diff options
Diffstat (limited to 'arch')
100 files changed, 1187 insertions, 804 deletions
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h index e8a761aee088..f939794363ac 100644 --- a/arch/alpha/include/asm/futex.h +++ b/arch/alpha/include/asm/futex.h | |||
@@ -108,7 +108,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
108 | " lda $31,3b-2b(%0)\n" | 108 | " lda $31,3b-2b(%0)\n" |
109 | " .previous\n" | 109 | " .previous\n" |
110 | : "+r"(ret), "=&r"(prev), "=&r"(cmp) | 110 | : "+r"(ret), "=&r"(prev), "=&r"(cmp) |
111 | : "r"(uaddr), "r"((long)oldval), "r"(newval) | 111 | : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) |
112 | : "memory"); | 112 | : "memory"); |
113 | 113 | ||
114 | *uval = prev; | 114 | *uval = prev; |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a48aecc17eac..dfb0312f4e73 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1280,7 +1280,7 @@ config ARM_ERRATA_743622 | |||
1280 | depends on CPU_V7 | 1280 | depends on CPU_V7 |
1281 | help | 1281 | help |
1282 | This option enables the workaround for the 743622 Cortex-A9 | 1282 | This option enables the workaround for the 743622 Cortex-A9 |
1283 | (r2p0..r2p2) erratum. Under very rare conditions, a faulty | 1283 | (r2p*) erratum. Under very rare conditions, a faulty |
1284 | optimisation in the Cortex-A9 Store Buffer may lead to data | 1284 | optimisation in the Cortex-A9 Store Buffer may lead to data |
1285 | corruption. This workaround sets a specific bit in the diagnostic | 1285 | corruption. This workaround sets a specific bit in the diagnostic |
1286 | register of the Cortex-A9 which disables the Store Buffer | 1286 | register of the Cortex-A9 which disables the Store Buffer |
diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore index ce1c5ff746e7..3c79f85975aa 100644 --- a/arch/arm/boot/.gitignore +++ b/arch/arm/boot/.gitignore | |||
@@ -3,3 +3,4 @@ zImage | |||
3 | xipImage | 3 | xipImage |
4 | bootpImage | 4 | bootpImage |
5 | uImage | 5 | uImage |
6 | *.dtb | ||
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index b5a5be2536c1..90114faa9f3c 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
@@ -134,7 +134,7 @@ int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); | |||
134 | 134 | ||
135 | u64 armpmu_event_update(struct perf_event *event, | 135 | u64 armpmu_event_update(struct perf_event *event, |
136 | struct hw_perf_event *hwc, | 136 | struct hw_perf_event *hwc, |
137 | int idx, int overflow); | 137 | int idx); |
138 | 138 | ||
139 | int armpmu_event_set_period(struct perf_event *event, | 139 | int armpmu_event_set_period(struct perf_event *event, |
140 | struct hw_perf_event *hwc, | 140 | struct hw_perf_event *hwc, |
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c index 4dd0edab6a65..1651d4950744 100644 --- a/arch/arm/kernel/ecard.c +++ b/arch/arm/kernel/ecard.c | |||
@@ -242,6 +242,7 @@ static void ecard_init_pgtables(struct mm_struct *mm) | |||
242 | 242 | ||
243 | memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); | 243 | memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); |
244 | 244 | ||
245 | vma.vm_flags = VM_EXEC; | ||
245 | vma.vm_mm = mm; | 246 | vma.vm_mm = mm; |
246 | 247 | ||
247 | flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); | 248 | flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index a23c42abc694..8a89d3b7626b 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event, | |||
180 | u64 | 180 | u64 |
181 | armpmu_event_update(struct perf_event *event, | 181 | armpmu_event_update(struct perf_event *event, |
182 | struct hw_perf_event *hwc, | 182 | struct hw_perf_event *hwc, |
183 | int idx, int overflow) | 183 | int idx) |
184 | { | 184 | { |
185 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 185 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
186 | u64 delta, prev_raw_count, new_raw_count; | 186 | u64 delta, prev_raw_count, new_raw_count; |
@@ -193,13 +193,7 @@ again: | |||
193 | new_raw_count) != prev_raw_count) | 193 | new_raw_count) != prev_raw_count) |
194 | goto again; | 194 | goto again; |
195 | 195 | ||
196 | new_raw_count &= armpmu->max_period; | 196 | delta = (new_raw_count - prev_raw_count) & armpmu->max_period; |
197 | prev_raw_count &= armpmu->max_period; | ||
198 | |||
199 | if (overflow) | ||
200 | delta = armpmu->max_period - prev_raw_count + new_raw_count + 1; | ||
201 | else | ||
202 | delta = new_raw_count - prev_raw_count; | ||
203 | 197 | ||
204 | local64_add(delta, &event->count); | 198 | local64_add(delta, &event->count); |
205 | local64_sub(delta, &hwc->period_left); | 199 | local64_sub(delta, &hwc->period_left); |
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event) | |||
216 | if (hwc->idx < 0) | 210 | if (hwc->idx < 0) |
217 | return; | 211 | return; |
218 | 212 | ||
219 | armpmu_event_update(event, hwc, hwc->idx, 0); | 213 | armpmu_event_update(event, hwc, hwc->idx); |
220 | } | 214 | } |
221 | 215 | ||
222 | static void | 216 | static void |
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags) | |||
232 | if (!(hwc->state & PERF_HES_STOPPED)) { | 226 | if (!(hwc->state & PERF_HES_STOPPED)) { |
233 | armpmu->disable(hwc, hwc->idx); | 227 | armpmu->disable(hwc, hwc->idx); |
234 | barrier(); /* why? */ | 228 | barrier(); /* why? */ |
235 | armpmu_event_update(event, hwc, hwc->idx, 0); | 229 | armpmu_event_update(event, hwc, hwc->idx); |
236 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | 230 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
237 | } | 231 | } |
238 | } | 232 | } |
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event) | |||
518 | hwc->config_base |= (unsigned long)mapping; | 512 | hwc->config_base |= (unsigned long)mapping; |
519 | 513 | ||
520 | if (!hwc->sample_period) { | 514 | if (!hwc->sample_period) { |
521 | hwc->sample_period = armpmu->max_period; | 515 | /* |
516 | * For non-sampling runs, limit the sample_period to half | ||
517 | * of the counter width. That way, the new counter value | ||
518 | * is far less likely to overtake the previous one unless | ||
519 | * you have some serious IRQ latency issues. | ||
520 | */ | ||
521 | hwc->sample_period = armpmu->max_period >> 1; | ||
522 | hwc->last_period = hwc->sample_period; | 522 | hwc->last_period = hwc->sample_period; |
523 | local64_set(&hwc->period_left, hwc->sample_period); | 523 | local64_set(&hwc->period_left, hwc->sample_period); |
524 | } | 524 | } |
@@ -684,6 +684,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu) | |||
684 | } | 684 | } |
685 | 685 | ||
686 | /* | 686 | /* |
687 | * PMU hardware loses all context when a CPU goes offline. | ||
688 | * When a CPU is hotplugged back in, since some hardware registers are | ||
689 | * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading | ||
690 | * junk values out of them. | ||
691 | */ | ||
692 | static int __cpuinit pmu_cpu_notify(struct notifier_block *b, | ||
693 | unsigned long action, void *hcpu) | ||
694 | { | ||
695 | if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) | ||
696 | return NOTIFY_DONE; | ||
697 | |||
698 | if (cpu_pmu && cpu_pmu->reset) | ||
699 | cpu_pmu->reset(NULL); | ||
700 | |||
701 | return NOTIFY_OK; | ||
702 | } | ||
703 | |||
704 | static struct notifier_block __cpuinitdata pmu_cpu_notifier = { | ||
705 | .notifier_call = pmu_cpu_notify, | ||
706 | }; | ||
707 | |||
708 | /* | ||
687 | * CPU PMU identification and registration. | 709 | * CPU PMU identification and registration. |
688 | */ | 710 | */ |
689 | static int __init | 711 | static int __init |
@@ -734,6 +756,7 @@ init_hw_perf_events(void) | |||
734 | pr_info("enabled with %s PMU driver, %d counters available\n", | 756 | pr_info("enabled with %s PMU driver, %d counters available\n", |
735 | cpu_pmu->name, cpu_pmu->num_events); | 757 | cpu_pmu->name, cpu_pmu->num_events); |
736 | cpu_pmu_init(cpu_pmu); | 758 | cpu_pmu_init(cpu_pmu); |
759 | register_cpu_notifier(&pmu_cpu_notifier); | ||
737 | armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); | 760 | armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); |
738 | } else { | 761 | } else { |
739 | pr_info("no hardware support available\n"); | 762 | pr_info("no hardware support available\n"); |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 533be9930ec2..b78af0cc6ef3 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, | |||
467 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 467 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
468 | } | 468 | } |
469 | 469 | ||
470 | static int counter_is_active(unsigned long pmcr, int idx) | ||
471 | { | ||
472 | unsigned long mask = 0; | ||
473 | if (idx == ARMV6_CYCLE_COUNTER) | ||
474 | mask = ARMV6_PMCR_CCOUNT_IEN; | ||
475 | else if (idx == ARMV6_COUNTER0) | ||
476 | mask = ARMV6_PMCR_COUNT0_IEN; | ||
477 | else if (idx == ARMV6_COUNTER1) | ||
478 | mask = ARMV6_PMCR_COUNT1_IEN; | ||
479 | |||
480 | if (mask) | ||
481 | return pmcr & mask; | ||
482 | |||
483 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | static irqreturn_t | 470 | static irqreturn_t |
488 | armv6pmu_handle_irq(int irq_num, | 471 | armv6pmu_handle_irq(int irq_num, |
489 | void *dev) | 472 | void *dev) |
@@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num, | |||
513 | struct perf_event *event = cpuc->events[idx]; | 496 | struct perf_event *event = cpuc->events[idx]; |
514 | struct hw_perf_event *hwc; | 497 | struct hw_perf_event *hwc; |
515 | 498 | ||
516 | if (!counter_is_active(pmcr, idx)) | 499 | /* Ignore if we don't have an event. */ |
500 | if (!event) | ||
517 | continue; | 501 | continue; |
518 | 502 | ||
519 | /* | 503 | /* |
@@ -524,7 +508,7 @@ armv6pmu_handle_irq(int irq_num, | |||
524 | continue; | 508 | continue; |
525 | 509 | ||
526 | hwc = &event->hw; | 510 | hwc = &event->hw; |
527 | armpmu_event_update(event, hwc, idx, 1); | 511 | armpmu_event_update(event, hwc, idx); |
528 | data.period = event->hw.last_period; | 512 | data.period = event->hw.last_period; |
529 | if (!armpmu_event_set_period(event, hwc, idx)) | 513 | if (!armpmu_event_set_period(event, hwc, idx)) |
530 | continue; | 514 | continue; |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 6933244c68f9..4d7095af2ab3 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -809,6 +809,11 @@ static inline int armv7_pmnc_disable_intens(int idx) | |||
809 | 809 | ||
810 | counter = ARMV7_IDX_TO_COUNTER(idx); | 810 | counter = ARMV7_IDX_TO_COUNTER(idx); |
811 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); | 811 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); |
812 | isb(); | ||
813 | /* Clear the overflow flag in case an interrupt is pending. */ | ||
814 | asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter))); | ||
815 | isb(); | ||
816 | |||
812 | return idx; | 817 | return idx; |
813 | } | 818 | } |
814 | 819 | ||
@@ -955,6 +960,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
955 | struct perf_event *event = cpuc->events[idx]; | 960 | struct perf_event *event = cpuc->events[idx]; |
956 | struct hw_perf_event *hwc; | 961 | struct hw_perf_event *hwc; |
957 | 962 | ||
963 | /* Ignore if we don't have an event. */ | ||
964 | if (!event) | ||
965 | continue; | ||
966 | |||
958 | /* | 967 | /* |
959 | * We have a single interrupt for all counters. Check that | 968 | * We have a single interrupt for all counters. Check that |
960 | * each counter has overflowed before we process it. | 969 | * each counter has overflowed before we process it. |
@@ -963,7 +972,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
963 | continue; | 972 | continue; |
964 | 973 | ||
965 | hwc = &event->hw; | 974 | hwc = &event->hw; |
966 | armpmu_event_update(event, hwc, idx, 1); | 975 | armpmu_event_update(event, hwc, idx); |
967 | data.period = event->hw.last_period; | 976 | data.period = event->hw.last_period; |
968 | if (!armpmu_event_set_period(event, hwc, idx)) | 977 | if (!armpmu_event_set_period(event, hwc, idx)) |
969 | continue; | 978 | continue; |
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 3b99d8269829..71a21e6712f5 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -255,11 +255,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
255 | struct perf_event *event = cpuc->events[idx]; | 255 | struct perf_event *event = cpuc->events[idx]; |
256 | struct hw_perf_event *hwc; | 256 | struct hw_perf_event *hwc; |
257 | 257 | ||
258 | if (!event) | ||
259 | continue; | ||
260 | |||
258 | if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) | 261 | if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) |
259 | continue; | 262 | continue; |
260 | 263 | ||
261 | hwc = &event->hw; | 264 | hwc = &event->hw; |
262 | armpmu_event_update(event, hwc, idx, 1); | 265 | armpmu_event_update(event, hwc, idx); |
263 | data.period = event->hw.last_period; | 266 | data.period = event->hw.last_period; |
264 | if (!armpmu_event_set_period(event, hwc, idx)) | 267 | if (!armpmu_event_set_period(event, hwc, idx)) |
265 | continue; | 268 | continue; |
@@ -592,11 +595,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
592 | struct perf_event *event = cpuc->events[idx]; | 595 | struct perf_event *event = cpuc->events[idx]; |
593 | struct hw_perf_event *hwc; | 596 | struct hw_perf_event *hwc; |
594 | 597 | ||
595 | if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) | 598 | if (!event) |
599 | continue; | ||
600 | |||
601 | if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx)) | ||
596 | continue; | 602 | continue; |
597 | 603 | ||
598 | hwc = &event->hw; | 604 | hwc = &event->hw; |
599 | armpmu_event_update(event, hwc, idx, 1); | 605 | armpmu_event_update(event, hwc, idx); |
600 | data.period = event->hw.last_period; | 606 | data.period = event->hw.last_period; |
601 | if (!armpmu_event_set_period(event, hwc, idx)) | 607 | if (!armpmu_event_set_period(event, hwc, idx)) |
602 | continue; | 608 | continue; |
@@ -663,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
663 | static void | 669 | static void |
664 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | 670 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) |
665 | { | 671 | { |
666 | unsigned long flags, ien, evtsel; | 672 | unsigned long flags, ien, evtsel, of_flags; |
667 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 673 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
668 | 674 | ||
669 | ien = xscale2pmu_read_int_enable(); | 675 | ien = xscale2pmu_read_int_enable(); |
@@ -672,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
672 | switch (idx) { | 678 | switch (idx) { |
673 | case XSCALE_CYCLE_COUNTER: | 679 | case XSCALE_CYCLE_COUNTER: |
674 | ien &= ~XSCALE2_CCOUNT_INT_EN; | 680 | ien &= ~XSCALE2_CCOUNT_INT_EN; |
681 | of_flags = XSCALE2_CCOUNT_OVERFLOW; | ||
675 | break; | 682 | break; |
676 | case XSCALE_COUNTER0: | 683 | case XSCALE_COUNTER0: |
677 | ien &= ~XSCALE2_COUNT0_INT_EN; | 684 | ien &= ~XSCALE2_COUNT0_INT_EN; |
678 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; | 685 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; |
679 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; | 686 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; |
687 | of_flags = XSCALE2_COUNT0_OVERFLOW; | ||
680 | break; | 688 | break; |
681 | case XSCALE_COUNTER1: | 689 | case XSCALE_COUNTER1: |
682 | ien &= ~XSCALE2_COUNT1_INT_EN; | 690 | ien &= ~XSCALE2_COUNT1_INT_EN; |
683 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; | 691 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; |
684 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; | 692 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; |
693 | of_flags = XSCALE2_COUNT1_OVERFLOW; | ||
685 | break; | 694 | break; |
686 | case XSCALE_COUNTER2: | 695 | case XSCALE_COUNTER2: |
687 | ien &= ~XSCALE2_COUNT2_INT_EN; | 696 | ien &= ~XSCALE2_COUNT2_INT_EN; |
688 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; | 697 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; |
689 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; | 698 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; |
699 | of_flags = XSCALE2_COUNT2_OVERFLOW; | ||
690 | break; | 700 | break; |
691 | case XSCALE_COUNTER3: | 701 | case XSCALE_COUNTER3: |
692 | ien &= ~XSCALE2_COUNT3_INT_EN; | 702 | ien &= ~XSCALE2_COUNT3_INT_EN; |
693 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; | 703 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; |
694 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; | 704 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; |
705 | of_flags = XSCALE2_COUNT3_OVERFLOW; | ||
695 | break; | 706 | break; |
696 | default: | 707 | default: |
697 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | 708 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); |
@@ -701,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
701 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 712 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
702 | xscale2pmu_write_event_select(evtsel); | 713 | xscale2pmu_write_event_select(evtsel); |
703 | xscale2pmu_write_int_enable(ien); | 714 | xscale2pmu_write_int_enable(ien); |
715 | xscale2pmu_write_overflow_flags(of_flags); | ||
704 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 716 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
705 | } | 717 | } |
706 | 718 | ||
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index b7582dd10dc3..96e2adcd5a84 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c | |||
@@ -38,10 +38,6 @@ | |||
38 | #if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE) | 38 | #if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE) |
39 | static u64 hdmac_dmamask = DMA_BIT_MASK(32); | 39 | static u64 hdmac_dmamask = DMA_BIT_MASK(32); |
40 | 40 | ||
41 | static struct at_dma_platform_data atdma_pdata = { | ||
42 | .nr_channels = 8, | ||
43 | }; | ||
44 | |||
45 | static struct resource hdmac_resources[] = { | 41 | static struct resource hdmac_resources[] = { |
46 | [0] = { | 42 | [0] = { |
47 | .start = AT91SAM9G45_BASE_DMA, | 43 | .start = AT91SAM9G45_BASE_DMA, |
@@ -56,12 +52,11 @@ static struct resource hdmac_resources[] = { | |||
56 | }; | 52 | }; |
57 | 53 | ||
58 | static struct platform_device at_hdmac_device = { | 54 | static struct platform_device at_hdmac_device = { |
59 | .name = "at_hdmac", | 55 | .name = "at91sam9g45_dma", |
60 | .id = -1, | 56 | .id = -1, |
61 | .dev = { | 57 | .dev = { |
62 | .dma_mask = &hdmac_dmamask, | 58 | .dma_mask = &hdmac_dmamask, |
63 | .coherent_dma_mask = DMA_BIT_MASK(32), | 59 | .coherent_dma_mask = DMA_BIT_MASK(32), |
64 | .platform_data = &atdma_pdata, | ||
65 | }, | 60 | }, |
66 | .resource = hdmac_resources, | 61 | .resource = hdmac_resources, |
67 | .num_resources = ARRAY_SIZE(hdmac_resources), | 62 | .num_resources = ARRAY_SIZE(hdmac_resources), |
@@ -69,9 +64,15 @@ static struct platform_device at_hdmac_device = { | |||
69 | 64 | ||
70 | void __init at91_add_device_hdmac(void) | 65 | void __init at91_add_device_hdmac(void) |
71 | { | 66 | { |
72 | dma_cap_set(DMA_MEMCPY, atdma_pdata.cap_mask); | 67 | #if defined(CONFIG_OF) |
73 | dma_cap_set(DMA_SLAVE, atdma_pdata.cap_mask); | 68 | struct device_node *of_node = |
74 | platform_device_register(&at_hdmac_device); | 69 | of_find_node_by_name(NULL, "dma-controller"); |
70 | |||
71 | if (of_node) | ||
72 | of_node_put(of_node); | ||
73 | else | ||
74 | #endif | ||
75 | platform_device_register(&at_hdmac_device); | ||
75 | } | 76 | } |
76 | #else | 77 | #else |
77 | void __init at91_add_device_hdmac(void) {} | 78 | void __init at91_add_device_hdmac(void) {} |
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c index 61908dce9784..9be71c11d0f0 100644 --- a/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/arch/arm/mach-at91/at91sam9rl_devices.c | |||
@@ -33,10 +33,6 @@ | |||
33 | #if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE) | 33 | #if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE) |
34 | static u64 hdmac_dmamask = DMA_BIT_MASK(32); | 34 | static u64 hdmac_dmamask = DMA_BIT_MASK(32); |
35 | 35 | ||
36 | static struct at_dma_platform_data atdma_pdata = { | ||
37 | .nr_channels = 2, | ||
38 | }; | ||
39 | |||
40 | static struct resource hdmac_resources[] = { | 36 | static struct resource hdmac_resources[] = { |
41 | [0] = { | 37 | [0] = { |
42 | .start = AT91SAM9RL_BASE_DMA, | 38 | .start = AT91SAM9RL_BASE_DMA, |
@@ -51,12 +47,11 @@ static struct resource hdmac_resources[] = { | |||
51 | }; | 47 | }; |
52 | 48 | ||
53 | static struct platform_device at_hdmac_device = { | 49 | static struct platform_device at_hdmac_device = { |
54 | .name = "at_hdmac", | 50 | .name = "at91sam9rl_dma", |
55 | .id = -1, | 51 | .id = -1, |
56 | .dev = { | 52 | .dev = { |
57 | .dma_mask = &hdmac_dmamask, | 53 | .dma_mask = &hdmac_dmamask, |
58 | .coherent_dma_mask = DMA_BIT_MASK(32), | 54 | .coherent_dma_mask = DMA_BIT_MASK(32), |
59 | .platform_data = &atdma_pdata, | ||
60 | }, | 55 | }, |
61 | .resource = hdmac_resources, | 56 | .resource = hdmac_resources, |
62 | .num_resources = ARRAY_SIZE(hdmac_resources), | 57 | .num_resources = ARRAY_SIZE(hdmac_resources), |
@@ -64,7 +59,6 @@ static struct platform_device at_hdmac_device = { | |||
64 | 59 | ||
65 | void __init at91_add_device_hdmac(void) | 60 | void __init at91_add_device_hdmac(void) |
66 | { | 61 | { |
67 | dma_cap_set(DMA_MEMCPY, atdma_pdata.cap_mask); | ||
68 | platform_device_register(&at_hdmac_device); | 62 | platform_device_register(&at_hdmac_device); |
69 | } | 63 | } |
70 | #else | 64 | #else |
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c index d5fb44f16d31..d67d0b4feb6f 100644 --- a/arch/arm/mach-ep93xx/vision_ep9307.c +++ b/arch/arm/mach-ep93xx/vision_ep9307.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <mach/ep93xx_spi.h> | 34 | #include <mach/ep93xx_spi.h> |
35 | #include <mach/gpio-ep93xx.h> | 35 | #include <mach/gpio-ep93xx.h> |
36 | 36 | ||
37 | #include <asm/hardware/vic.h> | ||
37 | #include <asm/mach-types.h> | 38 | #include <asm/mach-types.h> |
38 | #include <asm/mach/map.h> | 39 | #include <asm/mach/map.h> |
39 | #include <asm/mach/arch.h> | 40 | #include <asm/mach/arch.h> |
@@ -361,6 +362,7 @@ MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307") | |||
361 | .atag_offset = 0x100, | 362 | .atag_offset = 0x100, |
362 | .map_io = vision_map_io, | 363 | .map_io = vision_map_io, |
363 | .init_irq = ep93xx_init_irq, | 364 | .init_irq = ep93xx_init_irq, |
365 | .handle_irq = vic_handle_irq, | ||
364 | .timer = &ep93xx_timer, | 366 | .timer = &ep93xx_timer, |
365 | .init_machine = vision_init_machine, | 367 | .init_machine = vision_init_machine, |
366 | .restart = ep93xx_restart, | 368 | .restart = ep93xx_restart, |
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c index 0fc65ffde8ff..38939956c34f 100644 --- a/arch/arm/mach-exynos/mach-universal_c210.c +++ b/arch/arm/mach-exynos/mach-universal_c210.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/i2c.h> | 13 | #include <linux/i2c.h> |
14 | #include <linux/gpio_keys.h> | 14 | #include <linux/gpio_keys.h> |
15 | #include <linux/gpio.h> | 15 | #include <linux/gpio.h> |
16 | #include <linux/interrupt.h> | ||
16 | #include <linux/fb.h> | 17 | #include <linux/fb.h> |
17 | #include <linux/mfd/max8998.h> | 18 | #include <linux/mfd/max8998.h> |
18 | #include <linux/regulator/machine.h> | 19 | #include <linux/regulator/machine.h> |
@@ -595,6 +596,7 @@ static struct mxt_platform_data qt602240_platform_data = { | |||
595 | .threshold = 0x28, | 596 | .threshold = 0x28, |
596 | .voltage = 2800000, /* 2.8V */ | 597 | .voltage = 2800000, /* 2.8V */ |
597 | .orient = MXT_DIAGONAL, | 598 | .orient = MXT_DIAGONAL, |
599 | .irqflags = IRQF_TRIGGER_FALLING, | ||
598 | }; | 600 | }; |
599 | 601 | ||
600 | static struct i2c_board_info i2c3_devs[] __initdata = { | 602 | static struct i2c_board_info i2c3_devs[] __initdata = { |
diff --git a/arch/arm/mach-lpc32xx/include/mach/irqs.h b/arch/arm/mach-lpc32xx/include/mach/irqs.h index 2667f52e3b04..9e3b90df32e1 100644 --- a/arch/arm/mach-lpc32xx/include/mach/irqs.h +++ b/arch/arm/mach-lpc32xx/include/mach/irqs.h | |||
@@ -61,7 +61,7 @@ | |||
61 | */ | 61 | */ |
62 | #define IRQ_LPC32XX_JTAG_COMM_TX LPC32XX_SIC1_IRQ(1) | 62 | #define IRQ_LPC32XX_JTAG_COMM_TX LPC32XX_SIC1_IRQ(1) |
63 | #define IRQ_LPC32XX_JTAG_COMM_RX LPC32XX_SIC1_IRQ(2) | 63 | #define IRQ_LPC32XX_JTAG_COMM_RX LPC32XX_SIC1_IRQ(2) |
64 | #define IRQ_LPC32XX_GPI_11 LPC32XX_SIC1_IRQ(4) | 64 | #define IRQ_LPC32XX_GPI_28 LPC32XX_SIC1_IRQ(4) |
65 | #define IRQ_LPC32XX_TS_P LPC32XX_SIC1_IRQ(6) | 65 | #define IRQ_LPC32XX_TS_P LPC32XX_SIC1_IRQ(6) |
66 | #define IRQ_LPC32XX_TS_IRQ LPC32XX_SIC1_IRQ(7) | 66 | #define IRQ_LPC32XX_TS_IRQ LPC32XX_SIC1_IRQ(7) |
67 | #define IRQ_LPC32XX_TS_AUX LPC32XX_SIC1_IRQ(8) | 67 | #define IRQ_LPC32XX_TS_AUX LPC32XX_SIC1_IRQ(8) |
diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c index 4eae566dfdc7..c74de01ab5b6 100644 --- a/arch/arm/mach-lpc32xx/irq.c +++ b/arch/arm/mach-lpc32xx/irq.c | |||
@@ -118,6 +118,10 @@ static const struct lpc32xx_event_info lpc32xx_events[NR_IRQS] = { | |||
118 | .event_group = &lpc32xx_event_pin_regs, | 118 | .event_group = &lpc32xx_event_pin_regs, |
119 | .mask = LPC32XX_CLKPWR_EXTSRC_GPI_06_BIT, | 119 | .mask = LPC32XX_CLKPWR_EXTSRC_GPI_06_BIT, |
120 | }, | 120 | }, |
121 | [IRQ_LPC32XX_GPI_28] = { | ||
122 | .event_group = &lpc32xx_event_pin_regs, | ||
123 | .mask = LPC32XX_CLKPWR_EXTSRC_GPI_28_BIT, | ||
124 | }, | ||
121 | [IRQ_LPC32XX_GPIO_00] = { | 125 | [IRQ_LPC32XX_GPIO_00] = { |
122 | .event_group = &lpc32xx_event_int_regs, | 126 | .event_group = &lpc32xx_event_int_regs, |
123 | .mask = LPC32XX_CLKPWR_INTSRC_GPIO_00_BIT, | 127 | .mask = LPC32XX_CLKPWR_INTSRC_GPIO_00_BIT, |
@@ -305,9 +309,18 @@ static int lpc32xx_irq_wake(struct irq_data *d, unsigned int state) | |||
305 | 309 | ||
306 | if (state) | 310 | if (state) |
307 | eventreg |= lpc32xx_events[d->irq].mask; | 311 | eventreg |= lpc32xx_events[d->irq].mask; |
308 | else | 312 | else { |
309 | eventreg &= ~lpc32xx_events[d->irq].mask; | 313 | eventreg &= ~lpc32xx_events[d->irq].mask; |
310 | 314 | ||
315 | /* | ||
316 | * When disabling the wakeup, clear the latched | ||
317 | * event | ||
318 | */ | ||
319 | __raw_writel(lpc32xx_events[d->irq].mask, | ||
320 | lpc32xx_events[d->irq]. | ||
321 | event_group->rawstat_reg); | ||
322 | } | ||
323 | |||
311 | __raw_writel(eventreg, | 324 | __raw_writel(eventreg, |
312 | lpc32xx_events[d->irq].event_group->enab_reg); | 325 | lpc32xx_events[d->irq].event_group->enab_reg); |
313 | 326 | ||
@@ -380,13 +393,15 @@ void __init lpc32xx_init_irq(void) | |||
380 | 393 | ||
381 | /* Setup SIC1 */ | 394 | /* Setup SIC1 */ |
382 | __raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC1_BASE)); | 395 | __raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC1_BASE)); |
383 | __raw_writel(MIC_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC1_BASE)); | 396 | __raw_writel(SIC1_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC1_BASE)); |
384 | __raw_writel(MIC_ATR_DEFAULT, LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC1_BASE)); | 397 | __raw_writel(SIC1_ATR_DEFAULT, |
398 | LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC1_BASE)); | ||
385 | 399 | ||
386 | /* Setup SIC2 */ | 400 | /* Setup SIC2 */ |
387 | __raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC2_BASE)); | 401 | __raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC2_BASE)); |
388 | __raw_writel(MIC_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC2_BASE)); | 402 | __raw_writel(SIC2_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC2_BASE)); |
389 | __raw_writel(MIC_ATR_DEFAULT, LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC2_BASE)); | 403 | __raw_writel(SIC2_ATR_DEFAULT, |
404 | LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC2_BASE)); | ||
390 | 405 | ||
391 | /* Configure supported IRQ's */ | 406 | /* Configure supported IRQ's */ |
392 | for (i = 0; i < NR_IRQS; i++) { | 407 | for (i = 0; i < NR_IRQS; i++) { |
diff --git a/arch/arm/mach-lpc32xx/serial.c b/arch/arm/mach-lpc32xx/serial.c index 429cfdbb2b3d..f2735281616a 100644 --- a/arch/arm/mach-lpc32xx/serial.c +++ b/arch/arm/mach-lpc32xx/serial.c | |||
@@ -88,6 +88,7 @@ struct uartinit { | |||
88 | char *uart_ck_name; | 88 | char *uart_ck_name; |
89 | u32 ck_mode_mask; | 89 | u32 ck_mode_mask; |
90 | void __iomem *pdiv_clk_reg; | 90 | void __iomem *pdiv_clk_reg; |
91 | resource_size_t mapbase; | ||
91 | }; | 92 | }; |
92 | 93 | ||
93 | static struct uartinit uartinit_data[] __initdata = { | 94 | static struct uartinit uartinit_data[] __initdata = { |
@@ -97,6 +98,7 @@ static struct uartinit uartinit_data[] __initdata = { | |||
97 | .ck_mode_mask = | 98 | .ck_mode_mask = |
98 | LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 5), | 99 | LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 5), |
99 | .pdiv_clk_reg = LPC32XX_CLKPWR_UART5_CLK_CTRL, | 100 | .pdiv_clk_reg = LPC32XX_CLKPWR_UART5_CLK_CTRL, |
101 | .mapbase = LPC32XX_UART5_BASE, | ||
100 | }, | 102 | }, |
101 | #endif | 103 | #endif |
102 | #ifdef CONFIG_ARCH_LPC32XX_UART3_SELECT | 104 | #ifdef CONFIG_ARCH_LPC32XX_UART3_SELECT |
@@ -105,6 +107,7 @@ static struct uartinit uartinit_data[] __initdata = { | |||
105 | .ck_mode_mask = | 107 | .ck_mode_mask = |
106 | LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 3), | 108 | LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 3), |
107 | .pdiv_clk_reg = LPC32XX_CLKPWR_UART3_CLK_CTRL, | 109 | .pdiv_clk_reg = LPC32XX_CLKPWR_UART3_CLK_CTRL, |
110 | .mapbase = LPC32XX_UART3_BASE, | ||
108 | }, | 111 | }, |
109 | #endif | 112 | #endif |
110 | #ifdef CONFIG_ARCH_LPC32XX_UART4_SELECT | 113 | #ifdef CONFIG_ARCH_LPC32XX_UART4_SELECT |
@@ -113,6 +116,7 @@ static struct uartinit uartinit_data[] __initdata = { | |||
113 | .ck_mode_mask = | 116 | .ck_mode_mask = |
114 | LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 4), | 117 | LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 4), |
115 | .pdiv_clk_reg = LPC32XX_CLKPWR_UART4_CLK_CTRL, | 118 | .pdiv_clk_reg = LPC32XX_CLKPWR_UART4_CLK_CTRL, |
119 | .mapbase = LPC32XX_UART4_BASE, | ||
116 | }, | 120 | }, |
117 | #endif | 121 | #endif |
118 | #ifdef CONFIG_ARCH_LPC32XX_UART6_SELECT | 122 | #ifdef CONFIG_ARCH_LPC32XX_UART6_SELECT |
@@ -121,6 +125,7 @@ static struct uartinit uartinit_data[] __initdata = { | |||
121 | .ck_mode_mask = | 125 | .ck_mode_mask = |
122 | LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 6), | 126 | LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 6), |
123 | .pdiv_clk_reg = LPC32XX_CLKPWR_UART6_CLK_CTRL, | 127 | .pdiv_clk_reg = LPC32XX_CLKPWR_UART6_CLK_CTRL, |
128 | .mapbase = LPC32XX_UART6_BASE, | ||
124 | }, | 129 | }, |
125 | #endif | 130 | #endif |
126 | }; | 131 | }; |
@@ -165,11 +170,24 @@ void __init lpc32xx_serial_init(void) | |||
165 | 170 | ||
166 | /* pre-UART clock divider set to 1 */ | 171 | /* pre-UART clock divider set to 1 */ |
167 | __raw_writel(0x0101, uartinit_data[i].pdiv_clk_reg); | 172 | __raw_writel(0x0101, uartinit_data[i].pdiv_clk_reg); |
173 | |||
174 | /* | ||
175 | * Force a flush of the RX FIFOs to work around a | ||
176 | * HW bug | ||
177 | */ | ||
178 | puart = uartinit_data[i].mapbase; | ||
179 | __raw_writel(0xC1, LPC32XX_UART_IIR_FCR(puart)); | ||
180 | __raw_writel(0x00, LPC32XX_UART_DLL_FIFO(puart)); | ||
181 | j = LPC32XX_SUART_FIFO_SIZE; | ||
182 | while (j--) | ||
183 | tmp = __raw_readl( | ||
184 | LPC32XX_UART_DLL_FIFO(puart)); | ||
185 | __raw_writel(0, LPC32XX_UART_IIR_FCR(puart)); | ||
168 | } | 186 | } |
169 | 187 | ||
170 | /* This needs to be done after all UART clocks are setup */ | 188 | /* This needs to be done after all UART clocks are setup */ |
171 | __raw_writel(clkmodes, LPC32XX_UARTCTL_CLKMODE); | 189 | __raw_writel(clkmodes, LPC32XX_UARTCTL_CLKMODE); |
172 | for (i = 0; i < ARRAY_SIZE(uartinit_data) - 1; i++) { | 190 | for (i = 0; i < ARRAY_SIZE(uartinit_data); i++) { |
173 | /* Force a flush of the RX FIFOs to work around a HW bug */ | 191 | /* Force a flush of the RX FIFOs to work around a HW bug */ |
174 | puart = serial_std_platform_data[i].mapbase; | 192 | puart = serial_std_platform_data[i].mapbase; |
175 | __raw_writel(0xC1, LPC32XX_UART_IIR_FCR(puart)); | 193 | __raw_writel(0xC1, LPC32XX_UART_IIR_FCR(puart)); |
diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c index 17cb76060125..3588a5584153 100644 --- a/arch/arm/mach-mmp/aspenite.c +++ b/arch/arm/mach-mmp/aspenite.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/mtd/partitions.h> | 17 | #include <linux/mtd/partitions.h> |
18 | #include <linux/mtd/nand.h> | 18 | #include <linux/mtd/nand.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/gpio.h> | ||
21 | 20 | ||
22 | #include <asm/mach-types.h> | 21 | #include <asm/mach-types.h> |
23 | #include <asm/mach/arch.h> | 22 | #include <asm/mach/arch.h> |
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c index 7bc17eaa12eb..ada1213982b4 100644 --- a/arch/arm/mach-mmp/pxa168.c +++ b/arch/arm/mach-mmp/pxa168.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <mach/dma.h> | 24 | #include <mach/dma.h> |
25 | #include <mach/devices.h> | 25 | #include <mach/devices.h> |
26 | #include <mach/mfp.h> | 26 | #include <mach/mfp.h> |
27 | #include <linux/platform_device.h> | ||
28 | #include <linux/dma-mapping.h> | 27 | #include <linux/dma-mapping.h> |
29 | #include <mach/pxa168.h> | 28 | #include <mach/pxa168.h> |
30 | 29 | ||
diff --git a/arch/arm/mach-mmp/tavorevb.c b/arch/arm/mach-mmp/tavorevb.c index 8e3b5af04a57..bc97170125bf 100644 --- a/arch/arm/mach-mmp/tavorevb.c +++ b/arch/arm/mach-mmp/tavorevb.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
14 | #include <linux/smc91x.h> | 14 | #include <linux/smc91x.h> |
15 | #include <linux/gpio.h> | ||
16 | 15 | ||
17 | #include <asm/mach-types.h> | 16 | #include <asm/mach-types.h> |
18 | #include <asm/mach/arch.h> | 17 | #include <asm/mach/arch.h> |
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c index 309369ea6978..be2002f42dea 100644 --- a/arch/arm/mach-omap1/board-innovator.c +++ b/arch/arm/mach-omap1/board-innovator.c | |||
@@ -416,13 +416,13 @@ static void __init innovator_init(void) | |||
416 | #ifdef CONFIG_ARCH_OMAP15XX | 416 | #ifdef CONFIG_ARCH_OMAP15XX |
417 | if (cpu_is_omap1510()) { | 417 | if (cpu_is_omap1510()) { |
418 | omap1_usb_init(&innovator1510_usb_config); | 418 | omap1_usb_init(&innovator1510_usb_config); |
419 | innovator_config[1].data = &innovator1510_lcd_config; | 419 | innovator_config[0].data = &innovator1510_lcd_config; |
420 | } | 420 | } |
421 | #endif | 421 | #endif |
422 | #ifdef CONFIG_ARCH_OMAP16XX | 422 | #ifdef CONFIG_ARCH_OMAP16XX |
423 | if (cpu_is_omap1610()) { | 423 | if (cpu_is_omap1610()) { |
424 | omap1_usb_init(&h2_usb_config); | 424 | omap1_usb_init(&h2_usb_config); |
425 | innovator_config[1].data = &innovator1610_lcd_config; | 425 | innovator_config[0].data = &innovator1610_lcd_config; |
426 | } | 426 | } |
427 | #endif | 427 | #endif |
428 | omap_board_config = innovator_config; | 428 | omap_board_config = innovator_config; |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index d965da45160e..e20c8ab80b0e 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -364,8 +364,8 @@ config OMAP3_SDRC_AC_TIMING | |||
364 | going on could result in system crashes; | 364 | going on could result in system crashes; |
365 | 365 | ||
366 | config OMAP4_ERRATA_I688 | 366 | config OMAP4_ERRATA_I688 |
367 | bool "OMAP4 errata: Async Bridge Corruption (BROKEN)" | 367 | bool "OMAP4 errata: Async Bridge Corruption" |
368 | depends on ARCH_OMAP4 && BROKEN | 368 | depends on ARCH_OMAP4 |
369 | select ARCH_HAS_BARRIERS | 369 | select ARCH_HAS_BARRIERS |
370 | help | 370 | help |
371 | If a data is stalled inside asynchronous bridge because of back | 371 | If a data is stalled inside asynchronous bridge because of back |
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index 42a4d11fad23..672262717601 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c | |||
@@ -371,7 +371,11 @@ static void n8x0_mmc_callback(void *data, u8 card_mask) | |||
371 | else | 371 | else |
372 | *openp = 0; | 372 | *openp = 0; |
373 | 373 | ||
374 | #ifdef CONFIG_MMC_OMAP | ||
374 | omap_mmc_notify_cover_event(mmc_device, index, *openp); | 375 | omap_mmc_notify_cover_event(mmc_device, index, *openp); |
376 | #else | ||
377 | pr_warn("MMC: notify cover event not available\n"); | ||
378 | #endif | ||
375 | } | 379 | } |
376 | 380 | ||
377 | static int n8x0_mmc_late_init(struct device *dev) | 381 | static int n8x0_mmc_late_init(struct device *dev) |
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index c775bead1497..c877236a8442 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c | |||
@@ -381,7 +381,7 @@ static int omap3evm_twl_gpio_setup(struct device *dev, | |||
381 | gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "EN_DVI"); | 381 | gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "EN_DVI"); |
382 | 382 | ||
383 | /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */ | 383 | /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */ |
384 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; | 384 | gpio_leds[0].gpio = gpio + TWL4030_GPIO_MAX + 1; |
385 | 385 | ||
386 | platform_device_register(&leds_gpio); | 386 | platform_device_register(&leds_gpio); |
387 | 387 | ||
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index febffde2ff10..7e9338e8d684 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h | |||
@@ -132,6 +132,7 @@ void omap3_map_io(void); | |||
132 | void am33xx_map_io(void); | 132 | void am33xx_map_io(void); |
133 | void omap4_map_io(void); | 133 | void omap4_map_io(void); |
134 | void ti81xx_map_io(void); | 134 | void ti81xx_map_io(void); |
135 | void omap_barriers_init(void); | ||
135 | 136 | ||
136 | /** | 137 | /** |
137 | * omap_test_timeout - busy-loop, testing a condition | 138 | * omap_test_timeout - busy-loop, testing a condition |
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index cfdbb86bc84e..72e018b9b260 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c | |||
@@ -65,7 +65,6 @@ static int omap4_enter_idle(struct cpuidle_device *dev, | |||
65 | struct timespec ts_preidle, ts_postidle, ts_idle; | 65 | struct timespec ts_preidle, ts_postidle, ts_idle; |
66 | u32 cpu1_state; | 66 | u32 cpu1_state; |
67 | int idle_time; | 67 | int idle_time; |
68 | int new_state_idx; | ||
69 | int cpu_id = smp_processor_id(); | 68 | int cpu_id = smp_processor_id(); |
70 | 69 | ||
71 | /* Used to keep track of the total time in idle */ | 70 | /* Used to keep track of the total time in idle */ |
@@ -84,8 +83,8 @@ static int omap4_enter_idle(struct cpuidle_device *dev, | |||
84 | */ | 83 | */ |
85 | cpu1_state = pwrdm_read_pwrst(cpu1_pd); | 84 | cpu1_state = pwrdm_read_pwrst(cpu1_pd); |
86 | if (cpu1_state != PWRDM_POWER_OFF) { | 85 | if (cpu1_state != PWRDM_POWER_OFF) { |
87 | new_state_idx = drv->safe_state_index; | 86 | index = drv->safe_state_index; |
88 | cx = cpuidle_get_statedata(&dev->states_usage[new_state_idx]); | 87 | cx = cpuidle_get_statedata(&dev->states_usage[index]); |
89 | } | 88 | } |
90 | 89 | ||
91 | if (index > 0) | 90 | if (index > 0) |
diff --git a/arch/arm/mach-omap2/gpmc-smsc911x.c b/arch/arm/mach-omap2/gpmc-smsc911x.c index 997033129d26..bbb870c04a5e 100644 --- a/arch/arm/mach-omap2/gpmc-smsc911x.c +++ b/arch/arm/mach-omap2/gpmc-smsc911x.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/smsc911x.h> | 21 | #include <linux/smsc911x.h> |
22 | #include <linux/regulator/fixed.h> | ||
23 | #include <linux/regulator/machine.h> | ||
22 | 24 | ||
23 | #include <plat/board.h> | 25 | #include <plat/board.h> |
24 | #include <plat/gpmc.h> | 26 | #include <plat/gpmc.h> |
@@ -42,6 +44,50 @@ static struct smsc911x_platform_config gpmc_smsc911x_config = { | |||
42 | .flags = SMSC911X_USE_16BIT, | 44 | .flags = SMSC911X_USE_16BIT, |
43 | }; | 45 | }; |
44 | 46 | ||
47 | static struct regulator_consumer_supply gpmc_smsc911x_supply[] = { | ||
48 | REGULATOR_SUPPLY("vddvario", "smsc911x.0"), | ||
49 | REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), | ||
50 | }; | ||
51 | |||
52 | /* Generic regulator definition to satisfy smsc911x */ | ||
53 | static struct regulator_init_data gpmc_smsc911x_reg_init_data = { | ||
54 | .constraints = { | ||
55 | .min_uV = 3300000, | ||
56 | .max_uV = 3300000, | ||
57 | .valid_modes_mask = REGULATOR_MODE_NORMAL | ||
58 | | REGULATOR_MODE_STANDBY, | ||
59 | .valid_ops_mask = REGULATOR_CHANGE_MODE | ||
60 | | REGULATOR_CHANGE_STATUS, | ||
61 | }, | ||
62 | .num_consumer_supplies = ARRAY_SIZE(gpmc_smsc911x_supply), | ||
63 | .consumer_supplies = gpmc_smsc911x_supply, | ||
64 | }; | ||
65 | |||
66 | static struct fixed_voltage_config gpmc_smsc911x_fixed_reg_data = { | ||
67 | .supply_name = "gpmc_smsc911x", | ||
68 | .microvolts = 3300000, | ||
69 | .gpio = -EINVAL, | ||
70 | .startup_delay = 0, | ||
71 | .enable_high = 0, | ||
72 | .enabled_at_boot = 1, | ||
73 | .init_data = &gpmc_smsc911x_reg_init_data, | ||
74 | }; | ||
75 | |||
76 | /* | ||
77 | * Platform device id of 42 is a temporary fix to avoid conflicts | ||
78 | * with other reg-fixed-voltage devices. The real fix should | ||
79 | * involve the driver core providing a way of dynamically | ||
80 | * assigning a unique id on registration for platform devices | ||
81 | * in the same name space. | ||
82 | */ | ||
83 | static struct platform_device gpmc_smsc911x_regulator = { | ||
84 | .name = "reg-fixed-voltage", | ||
85 | .id = 42, | ||
86 | .dev = { | ||
87 | .platform_data = &gpmc_smsc911x_fixed_reg_data, | ||
88 | }, | ||
89 | }; | ||
90 | |||
45 | /* | 91 | /* |
46 | * Initialize smsc911x device connected to the GPMC. Note that we | 92 | * Initialize smsc911x device connected to the GPMC. Note that we |
47 | * assume that pin multiplexing is done in the board-*.c file, | 93 | * assume that pin multiplexing is done in the board-*.c file, |
@@ -55,6 +101,12 @@ void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *board_data) | |||
55 | 101 | ||
56 | gpmc_cfg = board_data; | 102 | gpmc_cfg = board_data; |
57 | 103 | ||
104 | ret = platform_device_register(&gpmc_smsc911x_regulator); | ||
105 | if (ret < 0) { | ||
106 | pr_err("Unable to register smsc911x regulators: %d\n", ret); | ||
107 | return; | ||
108 | } | ||
109 | |||
58 | if (gpmc_cs_request(gpmc_cfg->cs, SZ_16M, &cs_mem_base) < 0) { | 110 | if (gpmc_cs_request(gpmc_cfg->cs, SZ_16M, &cs_mem_base) < 0) { |
59 | pr_err("Failed to request GPMC mem region\n"); | 111 | pr_err("Failed to request GPMC mem region\n"); |
60 | return; | 112 | return; |
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index b40c28895298..19dd1657245c 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c | |||
@@ -428,6 +428,7 @@ static int omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, | |||
428 | return 0; | 428 | return 0; |
429 | } | 429 | } |
430 | 430 | ||
431 | static int omap_hsmmc_done; | ||
431 | #define MAX_OMAP_MMC_HWMOD_NAME_LEN 16 | 432 | #define MAX_OMAP_MMC_HWMOD_NAME_LEN 16 |
432 | 433 | ||
433 | void omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr) | 434 | void omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr) |
@@ -491,6 +492,11 @@ void omap2_hsmmc_init(struct omap2_hsmmc_info *controllers) | |||
491 | { | 492 | { |
492 | u32 reg; | 493 | u32 reg; |
493 | 494 | ||
495 | if (omap_hsmmc_done) | ||
496 | return; | ||
497 | |||
498 | omap_hsmmc_done = 1; | ||
499 | |||
494 | if (!cpu_is_omap44xx()) { | 500 | if (!cpu_is_omap44xx()) { |
495 | if (cpu_is_omap2430()) { | 501 | if (cpu_is_omap2430()) { |
496 | control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE; | 502 | control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE; |
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 6c5826605eae..719ee423abe2 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c | |||
@@ -343,6 +343,7 @@ static void __init omap3_check_revision(const char **cpu_rev) | |||
343 | case 0xb944: | 343 | case 0xb944: |
344 | omap_revision = AM335X_REV_ES1_0; | 344 | omap_revision = AM335X_REV_ES1_0; |
345 | *cpu_rev = "1.0"; | 345 | *cpu_rev = "1.0"; |
346 | break; | ||
346 | case 0xb8f2: | 347 | case 0xb8f2: |
347 | switch (rev) { | 348 | switch (rev) { |
348 | case 0: | 349 | case 0: |
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index eb50c29fb644..fb11b44fbdec 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c | |||
@@ -307,6 +307,7 @@ void __init omapam33xx_map_common_io(void) | |||
307 | void __init omap44xx_map_common_io(void) | 307 | void __init omap44xx_map_common_io(void) |
308 | { | 308 | { |
309 | iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc)); | 309 | iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc)); |
310 | omap_barriers_init(); | ||
310 | } | 311 | } |
311 | #endif | 312 | #endif |
312 | 313 | ||
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c index 609ea2ded7e3..415a6f1cf419 100644 --- a/arch/arm/mach-omap2/mailbox.c +++ b/arch/arm/mach-omap2/mailbox.c | |||
@@ -281,8 +281,16 @@ static struct omap_mbox mbox_iva_info = { | |||
281 | .ops = &omap2_mbox_ops, | 281 | .ops = &omap2_mbox_ops, |
282 | .priv = &omap2_mbox_iva_priv, | 282 | .priv = &omap2_mbox_iva_priv, |
283 | }; | 283 | }; |
284 | #endif | ||
284 | 285 | ||
285 | struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL }; | 286 | #ifdef CONFIG_ARCH_OMAP2 |
287 | struct omap_mbox *omap2_mboxes[] = { | ||
288 | &mbox_dsp_info, | ||
289 | #ifdef CONFIG_SOC_OMAP2420 | ||
290 | &mbox_iva_info, | ||
291 | #endif | ||
292 | NULL | ||
293 | }; | ||
286 | #endif | 294 | #endif |
287 | 295 | ||
288 | #if defined(CONFIG_ARCH_OMAP4) | 296 | #if defined(CONFIG_ARCH_OMAP4) |
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index fb8bc9fa43b1..611a0e3d54ca 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c | |||
@@ -218,7 +218,7 @@ static int _omap_mux_get_by_name(struct omap_mux_partition *partition, | |||
218 | return -ENODEV; | 218 | return -ENODEV; |
219 | } | 219 | } |
220 | 220 | ||
221 | static int __init | 221 | static int |
222 | omap_mux_get_by_name(const char *muxname, | 222 | omap_mux_get_by_name(const char *muxname, |
223 | struct omap_mux_partition **found_partition, | 223 | struct omap_mux_partition **found_partition, |
224 | struct omap_mux **found_mux) | 224 | struct omap_mux **found_mux) |
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c index b8822048e409..ac49384d0285 100644 --- a/arch/arm/mach-omap2/omap-iommu.c +++ b/arch/arm/mach-omap2/omap-iommu.c | |||
@@ -150,7 +150,8 @@ err_out: | |||
150 | platform_device_put(omap_iommu_pdev[i]); | 150 | platform_device_put(omap_iommu_pdev[i]); |
151 | return err; | 151 | return err; |
152 | } | 152 | } |
153 | module_init(omap_iommu_init); | 153 | /* must be ready before omap3isp is probed */ |
154 | subsys_initcall(omap_iommu_init); | ||
154 | 155 | ||
155 | static void __exit omap_iommu_exit(void) | 156 | static void __exit omap_iommu_exit(void) |
156 | { | 157 | { |
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 40a8fbc07e4b..70de277f5c15 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c | |||
@@ -24,12 +24,14 @@ | |||
24 | 24 | ||
25 | #include <plat/irqs.h> | 25 | #include <plat/irqs.h> |
26 | #include <plat/sram.h> | 26 | #include <plat/sram.h> |
27 | #include <plat/omap-secure.h> | ||
27 | 28 | ||
28 | #include <mach/hardware.h> | 29 | #include <mach/hardware.h> |
29 | #include <mach/omap-wakeupgen.h> | 30 | #include <mach/omap-wakeupgen.h> |
30 | 31 | ||
31 | #include "common.h" | 32 | #include "common.h" |
32 | #include "omap4-sar-layout.h" | 33 | #include "omap4-sar-layout.h" |
34 | #include <linux/export.h> | ||
33 | 35 | ||
34 | #ifdef CONFIG_CACHE_L2X0 | 36 | #ifdef CONFIG_CACHE_L2X0 |
35 | static void __iomem *l2cache_base; | 37 | static void __iomem *l2cache_base; |
@@ -43,6 +45,9 @@ static void __iomem *sar_ram_base; | |||
43 | 45 | ||
44 | void __iomem *dram_sync, *sram_sync; | 46 | void __iomem *dram_sync, *sram_sync; |
45 | 47 | ||
48 | static phys_addr_t paddr; | ||
49 | static u32 size; | ||
50 | |||
46 | void omap_bus_sync(void) | 51 | void omap_bus_sync(void) |
47 | { | 52 | { |
48 | if (dram_sync && sram_sync) { | 53 | if (dram_sync && sram_sync) { |
@@ -51,19 +56,22 @@ void omap_bus_sync(void) | |||
51 | isb(); | 56 | isb(); |
52 | } | 57 | } |
53 | } | 58 | } |
59 | EXPORT_SYMBOL(omap_bus_sync); | ||
54 | 60 | ||
55 | static int __init omap_barriers_init(void) | 61 | /* Steal one page physical memory for barrier implementation */ |
62 | int __init omap_barrier_reserve_memblock(void) | ||
56 | { | 63 | { |
57 | struct map_desc dram_io_desc[1]; | ||
58 | phys_addr_t paddr; | ||
59 | u32 size; | ||
60 | |||
61 | if (!cpu_is_omap44xx()) | ||
62 | return -ENODEV; | ||
63 | 64 | ||
64 | size = ALIGN(PAGE_SIZE, SZ_1M); | 65 | size = ALIGN(PAGE_SIZE, SZ_1M); |
65 | paddr = arm_memblock_steal(size, SZ_1M); | 66 | paddr = arm_memblock_steal(size, SZ_1M); |
66 | 67 | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | void __init omap_barriers_init(void) | ||
72 | { | ||
73 | struct map_desc dram_io_desc[1]; | ||
74 | |||
67 | dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA; | 75 | dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA; |
68 | dram_io_desc[0].pfn = __phys_to_pfn(paddr); | 76 | dram_io_desc[0].pfn = __phys_to_pfn(paddr); |
69 | dram_io_desc[0].length = size; | 77 | dram_io_desc[0].length = size; |
@@ -75,9 +83,10 @@ static int __init omap_barriers_init(void) | |||
75 | pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n", | 83 | pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n", |
76 | (long long) paddr, dram_io_desc[0].virtual); | 84 | (long long) paddr, dram_io_desc[0].virtual); |
77 | 85 | ||
78 | return 0; | ||
79 | } | 86 | } |
80 | core_initcall(omap_barriers_init); | 87 | #else |
88 | void __init omap_barriers_init(void) | ||
89 | {} | ||
81 | #endif | 90 | #endif |
82 | 91 | ||
83 | void __init gic_init_irq(void) | 92 | void __init gic_init_irq(void) |
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 1881fe915149..5a65dd04aa38 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c | |||
@@ -174,14 +174,17 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name, | |||
174 | freq = clk->rate; | 174 | freq = clk->rate; |
175 | clk_put(clk); | 175 | clk_put(clk); |
176 | 176 | ||
177 | rcu_read_lock(); | ||
177 | opp = opp_find_freq_ceil(dev, &freq); | 178 | opp = opp_find_freq_ceil(dev, &freq); |
178 | if (IS_ERR(opp)) { | 179 | if (IS_ERR(opp)) { |
180 | rcu_read_unlock(); | ||
179 | pr_err("%s: unable to find boot up OPP for vdd_%s\n", | 181 | pr_err("%s: unable to find boot up OPP for vdd_%s\n", |
180 | __func__, vdd_name); | 182 | __func__, vdd_name); |
181 | goto exit; | 183 | goto exit; |
182 | } | 184 | } |
183 | 185 | ||
184 | bootup_volt = opp_get_voltage(opp); | 186 | bootup_volt = opp_get_voltage(opp); |
187 | rcu_read_unlock(); | ||
185 | if (!bootup_volt) { | 188 | if (!bootup_volt) { |
186 | pr_err("%s: unable to find voltage corresponding " | 189 | pr_err("%s: unable to find voltage corresponding " |
187 | "to the bootup OPP for vdd_%s\n", __func__, vdd_name); | 190 | "to the bootup OPP for vdd_%s\n", __func__, vdd_name); |
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c index 10b20c652e5d..4b57757bf9d1 100644 --- a/arch/arm/mach-omap2/twl-common.c +++ b/arch/arm/mach-omap2/twl-common.c | |||
@@ -270,7 +270,6 @@ static struct regulator_init_data omap4_vusb_idata = { | |||
270 | .constraints = { | 270 | .constraints = { |
271 | .min_uV = 3300000, | 271 | .min_uV = 3300000, |
272 | .max_uV = 3300000, | 272 | .max_uV = 3300000, |
273 | .apply_uV = true, | ||
274 | .valid_modes_mask = REGULATOR_MODE_NORMAL | 273 | .valid_modes_mask = REGULATOR_MODE_NORMAL |
275 | | REGULATOR_MODE_STANDBY, | 274 | | REGULATOR_MODE_STANDBY, |
276 | .valid_ops_mask = REGULATOR_CHANGE_MODE | 275 | .valid_ops_mask = REGULATOR_CHANGE_MODE |
diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c index 771dc781b746..f51348dafafd 100644 --- a/arch/arm/mach-omap2/usb-host.c +++ b/arch/arm/mach-omap2/usb-host.c | |||
@@ -486,7 +486,7 @@ static void setup_4430ohci_io_mux(const enum usbhs_omap_port_mode *port_mode) | |||
486 | void __init usbhs_init(const struct usbhs_omap_board_data *pdata) | 486 | void __init usbhs_init(const struct usbhs_omap_board_data *pdata) |
487 | { | 487 | { |
488 | struct omap_hwmod *oh[2]; | 488 | struct omap_hwmod *oh[2]; |
489 | struct omap_device *od; | 489 | struct platform_device *pdev; |
490 | int bus_id = -1; | 490 | int bus_id = -1; |
491 | int i; | 491 | int i; |
492 | 492 | ||
@@ -522,11 +522,11 @@ void __init usbhs_init(const struct usbhs_omap_board_data *pdata) | |||
522 | return; | 522 | return; |
523 | } | 523 | } |
524 | 524 | ||
525 | od = omap_device_build_ss(OMAP_USBHS_DEVICE, bus_id, oh, 2, | 525 | pdev = omap_device_build_ss(OMAP_USBHS_DEVICE, bus_id, oh, 2, |
526 | (void *)&usbhs_data, sizeof(usbhs_data), | 526 | (void *)&usbhs_data, sizeof(usbhs_data), |
527 | omap_uhhtll_latency, | 527 | omap_uhhtll_latency, |
528 | ARRAY_SIZE(omap_uhhtll_latency), false); | 528 | ARRAY_SIZE(omap_uhhtll_latency), false); |
529 | if (IS_ERR(od)) { | 529 | if (IS_ERR(pdev)) { |
530 | pr_err("Could not build hwmod devices %s,%s\n", | 530 | pr_err("Could not build hwmod devices %s,%s\n", |
531 | USBHS_UHH_HWMODNAME, USBHS_TLL_HWMODNAME); | 531 | USBHS_UHH_HWMODNAME, USBHS_TLL_HWMODNAME); |
532 | return; | 532 | return; |
diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h index 0d729e6619df..42d5cca66257 100644 --- a/arch/arm/mach-pxa/generic.h +++ b/arch/arm/mach-pxa/generic.h | |||
@@ -49,7 +49,6 @@ extern unsigned pxa3xx_get_clk_frequency_khz(int); | |||
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | extern struct syscore_ops pxa_irq_syscore_ops; | 51 | extern struct syscore_ops pxa_irq_syscore_ops; |
52 | extern struct syscore_ops pxa_gpio_syscore_ops; | ||
53 | extern struct syscore_ops pxa2xx_mfp_syscore_ops; | 52 | extern struct syscore_ops pxa2xx_mfp_syscore_ops; |
54 | extern struct syscore_ops pxa3xx_mfp_syscore_ops; | 53 | extern struct syscore_ops pxa3xx_mfp_syscore_ops; |
55 | 54 | ||
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c index fb9b62dcf4ca..208eef1c0485 100644 --- a/arch/arm/mach-pxa/hx4700.c +++ b/arch/arm/mach-pxa/hx4700.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <mach/hx4700.h> | 45 | #include <mach/hx4700.h> |
46 | #include <mach/irda.h> | 46 | #include <mach/irda.h> |
47 | 47 | ||
48 | #include <sound/ak4641.h> | ||
48 | #include <video/platform_lcd.h> | 49 | #include <video/platform_lcd.h> |
49 | #include <video/w100fb.h> | 50 | #include <video/w100fb.h> |
50 | 51 | ||
@@ -765,6 +766,28 @@ static struct i2c_board_info __initdata pi2c_board_info[] = { | |||
765 | }; | 766 | }; |
766 | 767 | ||
767 | /* | 768 | /* |
769 | * Asahi Kasei AK4641 on I2C | ||
770 | */ | ||
771 | |||
772 | static struct ak4641_platform_data ak4641_info = { | ||
773 | .gpio_power = GPIO27_HX4700_CODEC_ON, | ||
774 | .gpio_npdn = GPIO109_HX4700_CODEC_nPDN, | ||
775 | }; | ||
776 | |||
777 | static struct i2c_board_info i2c_board_info[] __initdata = { | ||
778 | { | ||
779 | I2C_BOARD_INFO("ak4641", 0x12), | ||
780 | .platform_data = &ak4641_info, | ||
781 | }, | ||
782 | }; | ||
783 | |||
784 | static struct platform_device audio = { | ||
785 | .name = "hx4700-audio", | ||
786 | .id = -1, | ||
787 | }; | ||
788 | |||
789 | |||
790 | /* | ||
768 | * PCMCIA | 791 | * PCMCIA |
769 | */ | 792 | */ |
770 | 793 | ||
@@ -790,6 +813,7 @@ static struct platform_device *devices[] __initdata = { | |||
790 | &gpio_vbus, | 813 | &gpio_vbus, |
791 | &power_supply, | 814 | &power_supply, |
792 | &strataflash, | 815 | &strataflash, |
816 | &audio, | ||
793 | &pcmcia, | 817 | &pcmcia, |
794 | }; | 818 | }; |
795 | 819 | ||
@@ -827,6 +851,7 @@ static void __init hx4700_init(void) | |||
827 | pxa_set_ficp_info(&ficp_info); | 851 | pxa_set_ficp_info(&ficp_info); |
828 | pxa27x_set_i2c_power_info(NULL); | 852 | pxa27x_set_i2c_power_info(NULL); |
829 | pxa_set_i2c_info(NULL); | 853 | pxa_set_i2c_info(NULL); |
854 | i2c_register_board_info(0, ARRAY_AND_SIZE(i2c_board_info)); | ||
830 | i2c_register_board_info(1, ARRAY_AND_SIZE(pi2c_board_info)); | 855 | i2c_register_board_info(1, ARRAY_AND_SIZE(pi2c_board_info)); |
831 | pxa2xx_set_spi_info(2, &pxa_ssp2_master_info); | 856 | pxa2xx_set_spi_info(2, &pxa_ssp2_master_info); |
832 | spi_register_board_info(ARRAY_AND_SIZE(tsc2046_board_info)); | 857 | spi_register_board_info(ARRAY_AND_SIZE(tsc2046_board_info)); |
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c index f14775536b83..29b62afc6f7c 100644 --- a/arch/arm/mach-pxa/mfp-pxa2xx.c +++ b/arch/arm/mach-pxa/mfp-pxa2xx.c | |||
@@ -226,6 +226,12 @@ static void __init pxa25x_mfp_init(void) | |||
226 | { | 226 | { |
227 | int i; | 227 | int i; |
228 | 228 | ||
229 | /* running before pxa_gpio_probe() */ | ||
230 | #ifdef CONFIG_CPU_PXA26x | ||
231 | pxa_last_gpio = 89; | ||
232 | #else | ||
233 | pxa_last_gpio = 84; | ||
234 | #endif | ||
229 | for (i = 0; i <= pxa_last_gpio; i++) | 235 | for (i = 0; i <= pxa_last_gpio; i++) |
230 | gpio_desc[i].valid = 1; | 236 | gpio_desc[i].valid = 1; |
231 | 237 | ||
@@ -295,6 +301,7 @@ static void __init pxa27x_mfp_init(void) | |||
295 | { | 301 | { |
296 | int i, gpio; | 302 | int i, gpio; |
297 | 303 | ||
304 | pxa_last_gpio = 120; /* running before pxa_gpio_probe() */ | ||
298 | for (i = 0; i <= pxa_last_gpio; i++) { | 305 | for (i = 0; i <= pxa_last_gpio; i++) { |
299 | /* skip GPIO2, 5, 6, 7, 8, they are not | 306 | /* skip GPIO2, 5, 6, 7, 8, they are not |
300 | * valid pins allow configuration | 307 | * valid pins allow configuration |
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index 91e4f6c03766..3352b37b60cf 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/suspend.h> | 25 | #include <linux/suspend.h> |
26 | #include <linux/syscore_ops.h> | 26 | #include <linux/syscore_ops.h> |
27 | #include <linux/irq.h> | 27 | #include <linux/irq.h> |
28 | #include <linux/gpio.h> | ||
29 | 28 | ||
30 | #include <asm/mach/map.h> | 29 | #include <asm/mach/map.h> |
31 | #include <asm/suspend.h> | 30 | #include <asm/suspend.h> |
@@ -209,6 +208,7 @@ static struct clk_lookup pxa25x_clkregs[] = { | |||
209 | INIT_CLKREG(&clk_pxa25x_gpio11, NULL, "GPIO11_CLK"), | 208 | INIT_CLKREG(&clk_pxa25x_gpio11, NULL, "GPIO11_CLK"), |
210 | INIT_CLKREG(&clk_pxa25x_gpio12, NULL, "GPIO12_CLK"), | 209 | INIT_CLKREG(&clk_pxa25x_gpio12, NULL, "GPIO12_CLK"), |
211 | INIT_CLKREG(&clk_pxa25x_mem, "pxa2xx-pcmcia", NULL), | 210 | INIT_CLKREG(&clk_pxa25x_mem, "pxa2xx-pcmcia", NULL), |
211 | INIT_CLKREG(&clk_dummy, "pxa-gpio", NULL), | ||
212 | }; | 212 | }; |
213 | 213 | ||
214 | static struct clk_lookup pxa25x_hwuart_clkreg = | 214 | static struct clk_lookup pxa25x_hwuart_clkreg = |
@@ -368,7 +368,6 @@ static int __init pxa25x_init(void) | |||
368 | 368 | ||
369 | register_syscore_ops(&pxa_irq_syscore_ops); | 369 | register_syscore_ops(&pxa_irq_syscore_ops); |
370 | register_syscore_ops(&pxa2xx_mfp_syscore_ops); | 370 | register_syscore_ops(&pxa2xx_mfp_syscore_ops); |
371 | register_syscore_ops(&pxa_gpio_syscore_ops); | ||
372 | register_syscore_ops(&pxa2xx_clock_syscore_ops); | 371 | register_syscore_ops(&pxa2xx_clock_syscore_ops); |
373 | 372 | ||
374 | ret = platform_add_devices(pxa25x_devices, | 373 | ret = platform_add_devices(pxa25x_devices, |
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index aed6cbcf3866..6bce78edce7a 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/i2c/pxa-i2c.h> | 24 | #include <linux/i2c/pxa-i2c.h> |
25 | #include <linux/gpio.h> | ||
26 | 25 | ||
27 | #include <asm/mach/map.h> | 26 | #include <asm/mach/map.h> |
28 | #include <mach/hardware.h> | 27 | #include <mach/hardware.h> |
@@ -230,6 +229,7 @@ static struct clk_lookup pxa27x_clkregs[] = { | |||
230 | INIT_CLKREG(&clk_pxa27x_im, NULL, "IMCLK"), | 229 | INIT_CLKREG(&clk_pxa27x_im, NULL, "IMCLK"), |
231 | INIT_CLKREG(&clk_pxa27x_memc, NULL, "MEMCLK"), | 230 | INIT_CLKREG(&clk_pxa27x_memc, NULL, "MEMCLK"), |
232 | INIT_CLKREG(&clk_pxa27x_mem, "pxa2xx-pcmcia", NULL), | 231 | INIT_CLKREG(&clk_pxa27x_mem, "pxa2xx-pcmcia", NULL), |
232 | INIT_CLKREG(&clk_dummy, "pxa-gpio", NULL), | ||
233 | }; | 233 | }; |
234 | 234 | ||
235 | #ifdef CONFIG_PM | 235 | #ifdef CONFIG_PM |
@@ -456,7 +456,6 @@ static int __init pxa27x_init(void) | |||
456 | 456 | ||
457 | register_syscore_ops(&pxa_irq_syscore_ops); | 457 | register_syscore_ops(&pxa_irq_syscore_ops); |
458 | register_syscore_ops(&pxa2xx_mfp_syscore_ops); | 458 | register_syscore_ops(&pxa2xx_mfp_syscore_ops); |
459 | register_syscore_ops(&pxa_gpio_syscore_ops); | ||
460 | register_syscore_ops(&pxa2xx_clock_syscore_ops); | 459 | register_syscore_ops(&pxa2xx_clock_syscore_ops); |
461 | 460 | ||
462 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); | 461 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); |
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 4f402afa6609..3918a672238e 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c | |||
@@ -462,7 +462,6 @@ static int __init pxa3xx_init(void) | |||
462 | 462 | ||
463 | register_syscore_ops(&pxa_irq_syscore_ops); | 463 | register_syscore_ops(&pxa_irq_syscore_ops); |
464 | register_syscore_ops(&pxa3xx_mfp_syscore_ops); | 464 | register_syscore_ops(&pxa3xx_mfp_syscore_ops); |
465 | register_syscore_ops(&pxa_gpio_syscore_ops); | ||
466 | register_syscore_ops(&pxa3xx_clock_syscore_ops); | 465 | register_syscore_ops(&pxa3xx_clock_syscore_ops); |
467 | 466 | ||
468 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); | 467 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); |
diff --git a/arch/arm/mach-pxa/pxa95x.c b/arch/arm/mach-pxa/pxa95x.c index d082a583df78..5ce434b95e87 100644 --- a/arch/arm/mach-pxa/pxa95x.c +++ b/arch/arm/mach-pxa/pxa95x.c | |||
@@ -283,7 +283,6 @@ static int __init pxa95x_init(void) | |||
283 | return ret; | 283 | return ret; |
284 | 284 | ||
285 | register_syscore_ops(&pxa_irq_syscore_ops); | 285 | register_syscore_ops(&pxa_irq_syscore_ops); |
286 | register_syscore_ops(&pxa_gpio_syscore_ops); | ||
287 | register_syscore_ops(&pxa3xx_clock_syscore_ops); | 286 | register_syscore_ops(&pxa3xx_clock_syscore_ops); |
288 | 287 | ||
289 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); | 288 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); |
diff --git a/arch/arm/mach-pxa/saarb.c b/arch/arm/mach-pxa/saarb.c index febc809ed5a6..5aded5e6148f 100644 --- a/arch/arm/mach-pxa/saarb.c +++ b/arch/arm/mach-pxa/saarb.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/i2c.h> | 15 | #include <linux/i2c.h> |
16 | #include <linux/i2c/pxa-i2c.h> | 16 | #include <linux/i2c/pxa-i2c.h> |
17 | #include <linux/mfd/88pm860x.h> | 17 | #include <linux/mfd/88pm860x.h> |
18 | #include <linux/gpio.h> | ||
19 | 18 | ||
20 | #include <asm/mach-types.h> | 19 | #include <asm/mach-types.h> |
21 | #include <asm/mach/arch.h> | 20 | #include <asm/mach/arch.h> |
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c index 8d5168d253a9..30989baf7f2a 100644 --- a/arch/arm/mach-pxa/sharpsl_pm.c +++ b/arch/arm/mach-pxa/sharpsl_pm.c | |||
@@ -168,6 +168,7 @@ struct battery_thresh sharpsl_battery_levels_noac[] = { | |||
168 | #define MAXCTRL_SEL_SH 4 | 168 | #define MAXCTRL_SEL_SH 4 |
169 | #define MAXCTRL_STR (1u << 7) | 169 | #define MAXCTRL_STR (1u << 7) |
170 | 170 | ||
171 | extern int max1111_read_channel(int); | ||
171 | /* | 172 | /* |
172 | * Read MAX1111 ADC | 173 | * Read MAX1111 ADC |
173 | */ | 174 | */ |
@@ -177,8 +178,6 @@ int sharpsl_pm_pxa_read_max1111(int channel) | |||
177 | if (machine_is_tosa()) | 178 | if (machine_is_tosa()) |
178 | return 0; | 179 | return 0; |
179 | 180 | ||
180 | extern int max1111_read_channel(int); | ||
181 | |||
182 | /* max1111 accepts channels from 0-3, however, | 181 | /* max1111 accepts channels from 0-3, however, |
183 | * it is encoded from 0-7 here in the code. | 182 | * it is encoded from 0-7 here in the code. |
184 | */ | 183 | */ |
diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c index 34cbdac51525..438f02fe122a 100644 --- a/arch/arm/mach-pxa/spitz_pm.c +++ b/arch/arm/mach-pxa/spitz_pm.c | |||
@@ -172,10 +172,9 @@ static int spitz_should_wakeup(unsigned int resume_on_alarm) | |||
172 | static unsigned long spitz_charger_wakeup(void) | 172 | static unsigned long spitz_charger_wakeup(void) |
173 | { | 173 | { |
174 | unsigned long ret; | 174 | unsigned long ret; |
175 | ret = (!gpio_get_value(SPITZ_GPIO_KEY_INT) | 175 | ret = ((!gpio_get_value(SPITZ_GPIO_KEY_INT) |
176 | << GPIO_bit(SPITZ_GPIO_KEY_INT)) | 176 | << GPIO_bit(SPITZ_GPIO_KEY_INT)) |
177 | | (!gpio_get_value(SPITZ_GPIO_SYNC) | 177 | | gpio_get_value(SPITZ_GPIO_SYNC)); |
178 | << GPIO_bit(SPITZ_GPIO_SYNC)); | ||
179 | return ret; | 178 | return ret; |
180 | } | 179 | } |
181 | 180 | ||
diff --git a/arch/arm/mach-s3c2440/common.h b/arch/arm/mach-s3c2440/common.h index db8a98ac68c5..0c1eb1dfc534 100644 --- a/arch/arm/mach-s3c2440/common.h +++ b/arch/arm/mach-s3c2440/common.h | |||
@@ -12,6 +12,6 @@ | |||
12 | #ifndef __ARCH_ARM_MACH_S3C2440_COMMON_H | 12 | #ifndef __ARCH_ARM_MACH_S3C2440_COMMON_H |
13 | #define __ARCH_ARM_MACH_S3C2440_COMMON_H | 13 | #define __ARCH_ARM_MACH_S3C2440_COMMON_H |
14 | 14 | ||
15 | void s3c2440_restart(char mode, const char *cmd); | 15 | void s3c244x_restart(char mode, const char *cmd); |
16 | 16 | ||
17 | #endif /* __ARCH_ARM_MACH_S3C2440_COMMON_H */ | 17 | #endif /* __ARCH_ARM_MACH_S3C2440_COMMON_H */ |
diff --git a/arch/arm/mach-s3c2440/mach-anubis.c b/arch/arm/mach-s3c2440/mach-anubis.c index 24569550de1a..19b577bc09b8 100644 --- a/arch/arm/mach-s3c2440/mach-anubis.c +++ b/arch/arm/mach-s3c2440/mach-anubis.c | |||
@@ -487,5 +487,5 @@ MACHINE_START(ANUBIS, "Simtec-Anubis") | |||
487 | .init_machine = anubis_init, | 487 | .init_machine = anubis_init, |
488 | .init_irq = s3c24xx_init_irq, | 488 | .init_irq = s3c24xx_init_irq, |
489 | .timer = &s3c24xx_timer, | 489 | .timer = &s3c24xx_timer, |
490 | .restart = s3c2440_restart, | 490 | .restart = s3c244x_restart, |
491 | MACHINE_END | 491 | MACHINE_END |
diff --git a/arch/arm/mach-s3c2440/mach-at2440evb.c b/arch/arm/mach-s3c2440/mach-at2440evb.c index d6a9763110cd..d7ae49c90118 100644 --- a/arch/arm/mach-s3c2440/mach-at2440evb.c +++ b/arch/arm/mach-s3c2440/mach-at2440evb.c | |||
@@ -222,5 +222,5 @@ MACHINE_START(AT2440EVB, "AT2440EVB") | |||
222 | .init_machine = at2440evb_init, | 222 | .init_machine = at2440evb_init, |
223 | .init_irq = s3c24xx_init_irq, | 223 | .init_irq = s3c24xx_init_irq, |
224 | .timer = &s3c24xx_timer, | 224 | .timer = &s3c24xx_timer, |
225 | .restart = s3c2440_restart, | 225 | .restart = s3c244x_restart, |
226 | MACHINE_END | 226 | MACHINE_END |
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c index 5859e609d28c..9a4a5bc008e6 100644 --- a/arch/arm/mach-s3c2440/mach-gta02.c +++ b/arch/arm/mach-s3c2440/mach-gta02.c | |||
@@ -601,5 +601,5 @@ MACHINE_START(NEO1973_GTA02, "GTA02") | |||
601 | .init_irq = s3c24xx_init_irq, | 601 | .init_irq = s3c24xx_init_irq, |
602 | .init_machine = gta02_machine_init, | 602 | .init_machine = gta02_machine_init, |
603 | .timer = &s3c24xx_timer, | 603 | .timer = &s3c24xx_timer, |
604 | .restart = s3c2440_restart, | 604 | .restart = s3c244x_restart, |
605 | MACHINE_END | 605 | MACHINE_END |
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c index adbbb85bc4cd..5d66fb218a41 100644 --- a/arch/arm/mach-s3c2440/mach-mini2440.c +++ b/arch/arm/mach-s3c2440/mach-mini2440.c | |||
@@ -701,5 +701,5 @@ MACHINE_START(MINI2440, "MINI2440") | |||
701 | .init_machine = mini2440_init, | 701 | .init_machine = mini2440_init, |
702 | .init_irq = s3c24xx_init_irq, | 702 | .init_irq = s3c24xx_init_irq, |
703 | .timer = &s3c24xx_timer, | 703 | .timer = &s3c24xx_timer, |
704 | .restart = s3c2440_restart, | 704 | .restart = s3c244x_restart, |
705 | MACHINE_END | 705 | MACHINE_END |
diff --git a/arch/arm/mach-s3c2440/mach-nexcoder.c b/arch/arm/mach-s3c2440/mach-nexcoder.c index 40eaf844bc1f..5198e3e1c5be 100644 --- a/arch/arm/mach-s3c2440/mach-nexcoder.c +++ b/arch/arm/mach-s3c2440/mach-nexcoder.c | |||
@@ -158,5 +158,5 @@ MACHINE_START(NEXCODER_2440, "NexVision - Nexcoder 2440") | |||
158 | .init_machine = nexcoder_init, | 158 | .init_machine = nexcoder_init, |
159 | .init_irq = s3c24xx_init_irq, | 159 | .init_irq = s3c24xx_init_irq, |
160 | .timer = &s3c24xx_timer, | 160 | .timer = &s3c24xx_timer, |
161 | .restart = s3c2440_restart, | 161 | .restart = s3c244x_restart, |
162 | MACHINE_END | 162 | MACHINE_END |
diff --git a/arch/arm/mach-s3c2440/mach-osiris.c b/arch/arm/mach-s3c2440/mach-osiris.c index 4c480ef734f6..c5daeb612a88 100644 --- a/arch/arm/mach-s3c2440/mach-osiris.c +++ b/arch/arm/mach-s3c2440/mach-osiris.c | |||
@@ -436,5 +436,5 @@ MACHINE_START(OSIRIS, "Simtec-OSIRIS") | |||
436 | .init_irq = s3c24xx_init_irq, | 436 | .init_irq = s3c24xx_init_irq, |
437 | .init_machine = osiris_init, | 437 | .init_machine = osiris_init, |
438 | .timer = &s3c24xx_timer, | 438 | .timer = &s3c24xx_timer, |
439 | .restart = s3c2440_restart, | 439 | .restart = s3c244x_restart, |
440 | MACHINE_END | 440 | MACHINE_END |
diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c index 80077f6472ee..6f68abf44fab 100644 --- a/arch/arm/mach-s3c2440/mach-rx1950.c +++ b/arch/arm/mach-s3c2440/mach-rx1950.c | |||
@@ -822,5 +822,5 @@ MACHINE_START(RX1950, "HP iPAQ RX1950") | |||
822 | .init_irq = s3c24xx_init_irq, | 822 | .init_irq = s3c24xx_init_irq, |
823 | .init_machine = rx1950_init_machine, | 823 | .init_machine = rx1950_init_machine, |
824 | .timer = &s3c24xx_timer, | 824 | .timer = &s3c24xx_timer, |
825 | .restart = s3c2440_restart, | 825 | .restart = s3c244x_restart, |
826 | MACHINE_END | 826 | MACHINE_END |
diff --git a/arch/arm/mach-s3c2440/mach-rx3715.c b/arch/arm/mach-s3c2440/mach-rx3715.c index 20103bafbd4b..56af35447598 100644 --- a/arch/arm/mach-s3c2440/mach-rx3715.c +++ b/arch/arm/mach-s3c2440/mach-rx3715.c | |||
@@ -213,5 +213,5 @@ MACHINE_START(RX3715, "IPAQ-RX3715") | |||
213 | .init_irq = rx3715_init_irq, | 213 | .init_irq = rx3715_init_irq, |
214 | .init_machine = rx3715_init_machine, | 214 | .init_machine = rx3715_init_machine, |
215 | .timer = &s3c24xx_timer, | 215 | .timer = &s3c24xx_timer, |
216 | .restart = s3c2440_restart, | 216 | .restart = s3c244x_restart, |
217 | MACHINE_END | 217 | MACHINE_END |
diff --git a/arch/arm/mach-s3c2440/mach-smdk2440.c b/arch/arm/mach-s3c2440/mach-smdk2440.c index 1deb60d12a60..83a1036d7dcb 100644 --- a/arch/arm/mach-s3c2440/mach-smdk2440.c +++ b/arch/arm/mach-s3c2440/mach-smdk2440.c | |||
@@ -183,5 +183,5 @@ MACHINE_START(S3C2440, "SMDK2440") | |||
183 | .map_io = smdk2440_map_io, | 183 | .map_io = smdk2440_map_io, |
184 | .init_machine = smdk2440_machine_init, | 184 | .init_machine = smdk2440_machine_init, |
185 | .timer = &s3c24xx_timer, | 185 | .timer = &s3c24xx_timer, |
186 | .restart = s3c2440_restart, | 186 | .restart = s3c244x_restart, |
187 | MACHINE_END | 187 | MACHINE_END |
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c index 517623a09fc5..2b3dddb49af7 100644 --- a/arch/arm/mach-s3c2440/s3c2440.c +++ b/arch/arm/mach-s3c2440/s3c2440.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <plat/cpu.h> | 35 | #include <plat/cpu.h> |
36 | #include <plat/s3c244x.h> | 36 | #include <plat/s3c244x.h> |
37 | #include <plat/pm.h> | 37 | #include <plat/pm.h> |
38 | #include <plat/watchdog-reset.h> | ||
39 | 38 | ||
40 | #include <plat/gpio-core.h> | 39 | #include <plat/gpio-core.h> |
41 | #include <plat/gpio-cfg.h> | 40 | #include <plat/gpio-cfg.h> |
@@ -74,15 +73,3 @@ void __init s3c2440_map_io(void) | |||
74 | s3c24xx_gpiocfg_default.set_pull = s3c24xx_gpio_setpull_1up; | 73 | s3c24xx_gpiocfg_default.set_pull = s3c24xx_gpio_setpull_1up; |
75 | s3c24xx_gpiocfg_default.get_pull = s3c24xx_gpio_getpull_1up; | 74 | s3c24xx_gpiocfg_default.get_pull = s3c24xx_gpio_getpull_1up; |
76 | } | 75 | } |
77 | |||
78 | void s3c2440_restart(char mode, const char *cmd) | ||
79 | { | ||
80 | if (mode == 's') { | ||
81 | soft_restart(0); | ||
82 | } | ||
83 | |||
84 | arch_wdt_reset(); | ||
85 | |||
86 | /* we'll take a jump through zero as a poor second */ | ||
87 | soft_restart(0); | ||
88 | } | ||
diff --git a/arch/arm/mach-s3c2440/s3c244x.c b/arch/arm/mach-s3c2440/s3c244x.c index 36bc60f61d0a..d15852f642b7 100644 --- a/arch/arm/mach-s3c2440/s3c244x.c +++ b/arch/arm/mach-s3c2440/s3c244x.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <plat/pm.h> | 46 | #include <plat/pm.h> |
47 | #include <plat/pll.h> | 47 | #include <plat/pll.h> |
48 | #include <plat/nand-core.h> | 48 | #include <plat/nand-core.h> |
49 | #include <plat/watchdog-reset.h> | ||
49 | 50 | ||
50 | static struct map_desc s3c244x_iodesc[] __initdata = { | 51 | static struct map_desc s3c244x_iodesc[] __initdata = { |
51 | IODESC_ENT(CLKPWR), | 52 | IODESC_ENT(CLKPWR), |
@@ -196,3 +197,14 @@ struct syscore_ops s3c244x_pm_syscore_ops = { | |||
196 | .suspend = s3c244x_suspend, | 197 | .suspend = s3c244x_suspend, |
197 | .resume = s3c244x_resume, | 198 | .resume = s3c244x_resume, |
198 | }; | 199 | }; |
200 | |||
201 | void s3c244x_restart(char mode, const char *cmd) | ||
202 | { | ||
203 | if (mode == 's') | ||
204 | soft_restart(0); | ||
205 | |||
206 | arch_wdt_reset(); | ||
207 | |||
208 | /* we'll take a jump through zero as a poor second */ | ||
209 | soft_restart(0); | ||
210 | } | ||
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index 52af00446a63..c59e8b892d6b 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig | |||
@@ -5,7 +5,7 @@ config UX500_SOC_COMMON | |||
5 | default y | 5 | default y |
6 | select ARM_GIC | 6 | select ARM_GIC |
7 | select HAS_MTU | 7 | select HAS_MTU |
8 | select ARM_ERRATA_753970 | 8 | select PL310_ERRATA_753970 |
9 | select ARM_ERRATA_754322 | 9 | select ARM_ERRATA_754322 |
10 | select ARM_ERRATA_764369 | 10 | select ARM_ERRATA_764369 |
11 | 11 | ||
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig index 9b3d0fbaee72..88c3ba151e87 100644 --- a/arch/arm/mach-vexpress/Kconfig +++ b/arch/arm/mach-vexpress/Kconfig | |||
@@ -7,7 +7,7 @@ config ARCH_VEXPRESS_CA9X4 | |||
7 | select ARM_GIC | 7 | select ARM_GIC |
8 | select ARM_ERRATA_720789 | 8 | select ARM_ERRATA_720789 |
9 | select ARM_ERRATA_751472 | 9 | select ARM_ERRATA_751472 |
10 | select ARM_ERRATA_753970 | 10 | select PL310_ERRATA_753970 |
11 | select HAVE_SMP | 11 | select HAVE_SMP |
12 | select MIGHT_HAVE_CACHE_L2X0 | 12 | select MIGHT_HAVE_CACHE_L2X0 |
13 | 13 | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 0404ccbb8aa3..f1c8486f7501 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -230,9 +230,7 @@ __v7_setup: | |||
230 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | 230 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register |
231 | #endif | 231 | #endif |
232 | #ifdef CONFIG_ARM_ERRATA_743622 | 232 | #ifdef CONFIG_ARM_ERRATA_743622 |
233 | teq r6, #0x20 @ present in r2p0 | 233 | teq r5, #0x00200000 @ only present in r2p* |
234 | teqne r6, #0x21 @ present in r2p1 | ||
235 | teqne r6, #0x22 @ present in r2p2 | ||
236 | mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register | 234 | mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register |
237 | orreq r10, r10, #1 << 6 @ set bit #6 | 235 | orreq r10, r10, #1 << 6 @ set bit #6 |
238 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | 236 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register |
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c index 06383b51e655..4de7d1e79e73 100644 --- a/arch/arm/plat-omap/common.c +++ b/arch/arm/plat-omap/common.c | |||
@@ -69,6 +69,7 @@ void __init omap_reserve(void) | |||
69 | omap_vram_reserve_sdram_memblock(); | 69 | omap_vram_reserve_sdram_memblock(); |
70 | omap_dsp_reserve_sdram_memblock(); | 70 | omap_dsp_reserve_sdram_memblock(); |
71 | omap_secure_ram_reserve_memblock(); | 71 | omap_secure_ram_reserve_memblock(); |
72 | omap_barrier_reserve_memblock(); | ||
72 | } | 73 | } |
73 | 74 | ||
74 | void __init omap_init_consistent_dma_size(void) | 75 | void __init omap_init_consistent_dma_size(void) |
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h index 2efd6454bce0..37bbbbb981b2 100644 --- a/arch/arm/plat-omap/include/plat/irqs.h +++ b/arch/arm/plat-omap/include/plat/irqs.h | |||
@@ -428,8 +428,16 @@ | |||
428 | #define OMAP_GPMC_NR_IRQS 8 | 428 | #define OMAP_GPMC_NR_IRQS 8 |
429 | #define OMAP_GPMC_IRQ_END (OMAP_GPMC_IRQ_BASE + OMAP_GPMC_NR_IRQS) | 429 | #define OMAP_GPMC_IRQ_END (OMAP_GPMC_IRQ_BASE + OMAP_GPMC_NR_IRQS) |
430 | 430 | ||
431 | /* PRCM IRQ handler */ | ||
432 | #ifdef CONFIG_ARCH_OMAP2PLUS | ||
433 | #define OMAP_PRCM_IRQ_BASE (OMAP_GPMC_IRQ_END) | ||
434 | #define OMAP_PRCM_NR_IRQS 64 | ||
435 | #define OMAP_PRCM_IRQ_END (OMAP_PRCM_IRQ_BASE + OMAP_PRCM_NR_IRQS) | ||
436 | #else | ||
437 | #define OMAP_PRCM_IRQ_END OMAP_GPMC_IRQ_END | ||
438 | #endif | ||
431 | 439 | ||
432 | #define NR_IRQS OMAP_GPMC_IRQ_END | 440 | #define NR_IRQS OMAP_PRCM_IRQ_END |
433 | 441 | ||
434 | #define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32)) | 442 | #define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32)) |
435 | 443 | ||
diff --git a/arch/arm/plat-omap/include/plat/omap-secure.h b/arch/arm/plat-omap/include/plat/omap-secure.h index 3047ff923a63..8c7994ce9869 100644 --- a/arch/arm/plat-omap/include/plat/omap-secure.h +++ b/arch/arm/plat-omap/include/plat/omap-secure.h | |||
@@ -10,4 +10,10 @@ static inline void omap_secure_ram_reserve_memblock(void) | |||
10 | { } | 10 | { } |
11 | #endif | 11 | #endif |
12 | 12 | ||
13 | #ifdef CONFIG_OMAP4_ERRATA_I688 | ||
14 | extern int omap_barrier_reserve_memblock(void); | ||
15 | #else | ||
16 | static inline void omap_barrier_reserve_memblock(void) | ||
17 | { } | ||
18 | #endif | ||
13 | #endif /* __OMAP_SECURE_H__ */ | 19 | #endif /* __OMAP_SECURE_H__ */ |
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index 9fe35348e03b..2bab4c99a234 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c | |||
@@ -1249,7 +1249,7 @@ static void s3c2410_dma_resume(void) | |||
1249 | struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1; | 1249 | struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1; |
1250 | int channel; | 1250 | int channel; |
1251 | 1251 | ||
1252 | for (channel = dma_channels - 1; channel >= 0; cp++, channel--) | 1252 | for (channel = dma_channels - 1; channel >= 0; cp--, channel--) |
1253 | s3c2410_dma_resume_chan(cp); | 1253 | s3c2410_dma_resume_chan(cp); |
1254 | } | 1254 | } |
1255 | 1255 | ||
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c index f10768e988d4..d21d744e4d99 100644 --- a/arch/arm/plat-samsung/devs.c +++ b/arch/arm/plat-samsung/devs.c | |||
@@ -1409,7 +1409,7 @@ void __init s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd) | |||
1409 | 1409 | ||
1410 | #ifdef CONFIG_S3C_DEV_USB_HSOTG | 1410 | #ifdef CONFIG_S3C_DEV_USB_HSOTG |
1411 | static struct resource s3c_usb_hsotg_resources[] = { | 1411 | static struct resource s3c_usb_hsotg_resources[] = { |
1412 | [0] = DEFINE_RES_MEM(S3C_PA_USB_HSOTG, SZ_16K), | 1412 | [0] = DEFINE_RES_MEM(S3C_PA_USB_HSOTG, SZ_128K), |
1413 | [1] = DEFINE_RES_IRQ(IRQ_OTG), | 1413 | [1] = DEFINE_RES_IRQ(IRQ_OTG), |
1414 | }; | 1414 | }; |
1415 | 1415 | ||
diff --git a/arch/arm/plat-spear/time.c b/arch/arm/plat-spear/time.c index 0c77e4298675..abb5bdecd509 100644 --- a/arch/arm/plat-spear/time.c +++ b/arch/arm/plat-spear/time.c | |||
@@ -145,11 +145,13 @@ static void clockevent_set_mode(enum clock_event_mode mode, | |||
145 | static int clockevent_next_event(unsigned long cycles, | 145 | static int clockevent_next_event(unsigned long cycles, |
146 | struct clock_event_device *clk_event_dev) | 146 | struct clock_event_device *clk_event_dev) |
147 | { | 147 | { |
148 | u16 val; | 148 | u16 val = readw(gpt_base + CR(CLKEVT)); |
149 | |||
150 | if (val & CTRL_ENABLE) | ||
151 | writew(val & ~CTRL_ENABLE, gpt_base + CR(CLKEVT)); | ||
149 | 152 | ||
150 | writew(cycles, gpt_base + LOAD(CLKEVT)); | 153 | writew(cycles, gpt_base + LOAD(CLKEVT)); |
151 | 154 | ||
152 | val = readw(gpt_base + CR(CLKEVT)); | ||
153 | val |= CTRL_ENABLE | CTRL_INT_ENABLE; | 155 | val |= CTRL_ENABLE | CTRL_INT_ENABLE; |
154 | writew(val, gpt_base + CR(CLKEVT)); | 156 | writew(val, gpt_base + CR(CLKEVT)); |
155 | 157 | ||
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h index 8154c4ee8c9c..77ecbded1f37 100644 --- a/arch/c6x/include/asm/processor.h +++ b/arch/c6x/include/asm/processor.h | |||
@@ -122,8 +122,8 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | |||
122 | 122 | ||
123 | extern unsigned long get_wchan(struct task_struct *p); | 123 | extern unsigned long get_wchan(struct task_struct *p); |
124 | 124 | ||
125 | #define KSTK_EIP(tsk) (task_pt_regs(task)->pc) | 125 | #define KSTK_EIP(task) (task_pt_regs(task)->pc) |
126 | #define KSTK_ESP(tsk) (task_pt_regs(task)->sp) | 126 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) |
127 | 127 | ||
128 | #define cpu_relax() do { } while (0) | 128 | #define cpu_relax() do { } while (0) |
129 | 129 | ||
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c index 7da4d0081487..a7193ae13a5d 100644 --- a/arch/mips/alchemy/common/time.c +++ b/arch/mips/alchemy/common/time.c | |||
@@ -146,7 +146,7 @@ static int __init alchemy_time_init(unsigned int m2int) | |||
146 | cd->shift = 32; | 146 | cd->shift = 32; |
147 | cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift); | 147 | cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift); |
148 | cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd); | 148 | cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd); |
149 | cd->min_delta_ns = clockevent_delta2ns(8, cd); /* ~0.25ms */ | 149 | cd->min_delta_ns = clockevent_delta2ns(9, cd); /* ~0.28ms */ |
150 | clockevents_register_device(cd); | 150 | clockevents_register_device(cd); |
151 | setup_irq(m2int, &au1x_rtcmatch2_irqaction); | 151 | setup_irq(m2int, &au1x_rtcmatch2_irqaction); |
152 | 152 | ||
diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c index 24f546985b69..e21507052066 100644 --- a/arch/mips/ath79/dev-wmac.c +++ b/arch/mips/ath79/dev-wmac.c | |||
@@ -96,7 +96,7 @@ void __init ath79_register_wmac(u8 *cal_data) | |||
96 | { | 96 | { |
97 | if (soc_is_ar913x()) | 97 | if (soc_is_ar913x()) |
98 | ar913x_wmac_setup(); | 98 | ar913x_wmac_setup(); |
99 | if (soc_is_ar933x()) | 99 | else if (soc_is_ar933x()) |
100 | ar933x_wmac_setup(); | 100 | ar933x_wmac_setup(); |
101 | else | 101 | else |
102 | BUG(); | 102 | BUG(); |
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig index 4479fd669ac1..28c6b276c216 100644 --- a/arch/mips/configs/nlm_xlp_defconfig +++ b/arch/mips/configs/nlm_xlp_defconfig | |||
@@ -8,7 +8,7 @@ CONFIG_HIGH_RES_TIMERS=y | |||
8 | # CONFIG_SECCOMP is not set | 8 | # CONFIG_SECCOMP is not set |
9 | CONFIG_USE_OF=y | 9 | CONFIG_USE_OF=y |
10 | CONFIG_EXPERIMENTAL=y | 10 | CONFIG_EXPERIMENTAL=y |
11 | CONFIG_CROSS_COMPILE="mips-linux-gnu-" | 11 | CONFIG_CROSS_COMPILE="" |
12 | # CONFIG_LOCALVERSION_AUTO is not set | 12 | # CONFIG_LOCALVERSION_AUTO is not set |
13 | CONFIG_SYSVIPC=y | 13 | CONFIG_SYSVIPC=y |
14 | CONFIG_POSIX_MQUEUE=y | 14 | CONFIG_POSIX_MQUEUE=y |
@@ -22,7 +22,7 @@ CONFIG_AUDIT=y | |||
22 | CONFIG_CGROUPS=y | 22 | CONFIG_CGROUPS=y |
23 | CONFIG_NAMESPACES=y | 23 | CONFIG_NAMESPACES=y |
24 | CONFIG_BLK_DEV_INITRD=y | 24 | CONFIG_BLK_DEV_INITRD=y |
25 | CONFIG_INITRAMFS_SOURCE="usr/dev_file_list usr/rootfs.xlp" | 25 | CONFIG_INITRAMFS_SOURCE="" |
26 | CONFIG_RD_BZIP2=y | 26 | CONFIG_RD_BZIP2=y |
27 | CONFIG_RD_LZMA=y | 27 | CONFIG_RD_LZMA=y |
28 | CONFIG_INITRAMFS_COMPRESSION_LZMA=y | 28 | CONFIG_INITRAMFS_COMPRESSION_LZMA=y |
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig index 7c68666fdd64..d0b857d98c91 100644 --- a/arch/mips/configs/nlm_xlr_defconfig +++ b/arch/mips/configs/nlm_xlr_defconfig | |||
@@ -8,7 +8,7 @@ CONFIG_HIGH_RES_TIMERS=y | |||
8 | CONFIG_PREEMPT_VOLUNTARY=y | 8 | CONFIG_PREEMPT_VOLUNTARY=y |
9 | CONFIG_KEXEC=y | 9 | CONFIG_KEXEC=y |
10 | CONFIG_EXPERIMENTAL=y | 10 | CONFIG_EXPERIMENTAL=y |
11 | CONFIG_CROSS_COMPILE="mips-linux-gnu-" | 11 | CONFIG_CROSS_COMPILE="" |
12 | # CONFIG_LOCALVERSION_AUTO is not set | 12 | # CONFIG_LOCALVERSION_AUTO is not set |
13 | CONFIG_SYSVIPC=y | 13 | CONFIG_SYSVIPC=y |
14 | CONFIG_POSIX_MQUEUE=y | 14 | CONFIG_POSIX_MQUEUE=y |
@@ -22,7 +22,7 @@ CONFIG_AUDIT=y | |||
22 | CONFIG_NAMESPACES=y | 22 | CONFIG_NAMESPACES=y |
23 | CONFIG_SCHED_AUTOGROUP=y | 23 | CONFIG_SCHED_AUTOGROUP=y |
24 | CONFIG_BLK_DEV_INITRD=y | 24 | CONFIG_BLK_DEV_INITRD=y |
25 | CONFIG_INITRAMFS_SOURCE="usr/dev_file_list usr/rootfs.xlr" | 25 | CONFIG_INITRAMFS_SOURCE="" |
26 | CONFIG_RD_BZIP2=y | 26 | CONFIG_RD_BZIP2=y |
27 | CONFIG_RD_LZMA=y | 27 | CONFIG_RD_LZMA=y |
28 | CONFIG_INITRAMFS_COMPRESSION_GZIP=y | 28 | CONFIG_INITRAMFS_COMPRESSION_GZIP=y |
diff --git a/arch/mips/configs/powertv_defconfig b/arch/mips/configs/powertv_defconfig index 3b0b6e8c8533..7fda0ce5f692 100644 --- a/arch/mips/configs/powertv_defconfig +++ b/arch/mips/configs/powertv_defconfig | |||
@@ -6,7 +6,7 @@ CONFIG_HZ_1000=y | |||
6 | CONFIG_PREEMPT=y | 6 | CONFIG_PREEMPT=y |
7 | # CONFIG_SECCOMP is not set | 7 | # CONFIG_SECCOMP is not set |
8 | CONFIG_EXPERIMENTAL=y | 8 | CONFIG_EXPERIMENTAL=y |
9 | CONFIG_CROSS_COMPILE="mips-linux-" | 9 | CONFIG_CROSS_COMPILE="" |
10 | # CONFIG_SWAP is not set | 10 | # CONFIG_SWAP is not set |
11 | CONFIG_SYSVIPC=y | 11 | CONFIG_SYSVIPC=y |
12 | CONFIG_LOG_BUF_SHIFT=16 | 12 | CONFIG_LOG_BUF_SHIFT=16 |
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h index 556e1be20bf6..fb9975c74c57 100644 --- a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h +++ b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h | |||
@@ -11,6 +11,9 @@ | |||
11 | #include <asm/io.h> | 11 | #include <asm/io.h> |
12 | #include <asm/mach-au1x00/au1000.h> | 12 | #include <asm/mach-au1x00/au1000.h> |
13 | 13 | ||
14 | struct gpio; | ||
15 | struct gpio_chip; | ||
16 | |||
14 | /* with the current GPIC design, up to 128 GPIOs are possible. | 17 | /* with the current GPIC design, up to 128 GPIOs are possible. |
15 | * The only implementation so far is in the Au1300, which has 75 externally | 18 | * The only implementation so far is in the Au1300, which has 75 externally |
16 | * available GPIOs. | 19 | * available GPIOs. |
@@ -203,7 +206,22 @@ static inline int gpio_request(unsigned int gpio, const char *label) | |||
203 | return 0; | 206 | return 0; |
204 | } | 207 | } |
205 | 208 | ||
206 | static inline void gpio_free(unsigned int gpio) | 209 | static inline int gpio_request_one(unsigned gpio, |
210 | unsigned long flags, const char *label) | ||
211 | { | ||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | static inline int gpio_request_array(struct gpio *array, size_t num) | ||
216 | { | ||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | static inline void gpio_free(unsigned gpio) | ||
221 | { | ||
222 | } | ||
223 | |||
224 | static inline void gpio_free_array(struct gpio *array, size_t num) | ||
207 | { | 225 | { |
208 | } | 226 | } |
209 | 227 | ||
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index d41790928c64..da9bd7d270d1 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h | |||
@@ -39,9 +39,6 @@ | |||
39 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | 39 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
40 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 40 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
41 | #else /* !CONFIG_HUGETLB_PAGE */ | 41 | #else /* !CONFIG_HUGETLB_PAGE */ |
42 | # ifndef BUILD_BUG | ||
43 | # define BUILD_BUG() do { extern void __build_bug(void); __build_bug(); } while (0) | ||
44 | # endif | ||
45 | #define HPAGE_SHIFT ({BUILD_BUG(); 0; }) | 42 | #define HPAGE_SHIFT ({BUILD_BUG(); 0; }) |
46 | #define HPAGE_SIZE ({BUILD_BUG(); 0; }) | 43 | #define HPAGE_SIZE ({BUILD_BUG(); 0; }) |
47 | #define HPAGE_MASK ({BUILD_BUG(); 0; }) | 44 | #define HPAGE_MASK ({BUILD_BUG(); 0; }) |
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 58fe71afd879..d5e950ab8527 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c | |||
@@ -8,7 +8,6 @@ | |||
8 | * SMP support for BMIPS | 8 | * SMP support for BMIPS |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/version.h> | ||
12 | #include <linux/init.h> | 11 | #include <linux/init.h> |
13 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
14 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index cc4a3f120f54..d79ae5437b58 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -1135,7 +1135,7 @@ asmlinkage void do_mt(struct pt_regs *regs) | |||
1135 | printk(KERN_DEBUG "YIELD Scheduler Exception\n"); | 1135 | printk(KERN_DEBUG "YIELD Scheduler Exception\n"); |
1136 | break; | 1136 | break; |
1137 | case 5: | 1137 | case 5: |
1138 | printk(KERN_DEBUG "Gating Storage Schedulier Exception\n"); | 1138 | printk(KERN_DEBUG "Gating Storage Scheduler Exception\n"); |
1139 | break; | 1139 | break; |
1140 | default: | 1140 | default: |
1141 | printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", | 1141 | printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", |
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index a81176f44c74..924da5eb7031 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -69,7 +69,6 @@ SECTIONS | |||
69 | RODATA | 69 | RODATA |
70 | 70 | ||
71 | /* writeable */ | 71 | /* writeable */ |
72 | _sdata = .; /* Start of data section */ | ||
73 | .data : { /* Data */ | 72 | .data : { /* Data */ |
74 | . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ | 73 | . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ |
75 | 74 | ||
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 937cf3368164..69ebd586d7ff 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -42,6 +42,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ | |||
42 | const int field = sizeof(unsigned long) * 2; | 42 | const int field = sizeof(unsigned long) * 2; |
43 | siginfo_t info; | 43 | siginfo_t info; |
44 | int fault; | 44 | int fault; |
45 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | ||
46 | (write ? FAULT_FLAG_WRITE : 0); | ||
45 | 47 | ||
46 | #if 0 | 48 | #if 0 |
47 | printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), | 49 | printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), |
@@ -91,6 +93,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ | |||
91 | if (in_atomic() || !mm) | 93 | if (in_atomic() || !mm) |
92 | goto bad_area_nosemaphore; | 94 | goto bad_area_nosemaphore; |
93 | 95 | ||
96 | retry: | ||
94 | down_read(&mm->mmap_sem); | 97 | down_read(&mm->mmap_sem); |
95 | vma = find_vma(mm, address); | 98 | vma = find_vma(mm, address); |
96 | if (!vma) | 99 | if (!vma) |
@@ -144,7 +147,11 @@ good_area: | |||
144 | * make sure we exit gracefully rather than endlessly redo | 147 | * make sure we exit gracefully rather than endlessly redo |
145 | * the fault. | 148 | * the fault. |
146 | */ | 149 | */ |
147 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); | 150 | fault = handle_mm_fault(mm, vma, address, flags); |
151 | |||
152 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | ||
153 | return; | ||
154 | |||
148 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | 155 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
149 | if (unlikely(fault & VM_FAULT_ERROR)) { | 156 | if (unlikely(fault & VM_FAULT_ERROR)) { |
150 | if (fault & VM_FAULT_OOM) | 157 | if (fault & VM_FAULT_OOM) |
@@ -153,12 +160,27 @@ good_area: | |||
153 | goto do_sigbus; | 160 | goto do_sigbus; |
154 | BUG(); | 161 | BUG(); |
155 | } | 162 | } |
156 | if (fault & VM_FAULT_MAJOR) { | 163 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
157 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); | 164 | if (fault & VM_FAULT_MAJOR) { |
158 | tsk->maj_flt++; | 165 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
159 | } else { | 166 | regs, address); |
160 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); | 167 | tsk->maj_flt++; |
161 | tsk->min_flt++; | 168 | } else { |
169 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | ||
170 | regs, address); | ||
171 | tsk->min_flt++; | ||
172 | } | ||
173 | if (fault & VM_FAULT_RETRY) { | ||
174 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
175 | |||
176 | /* | ||
177 | * No need to up_read(&mm->mmap_sem) as we would | ||
178 | * have already released it in __lock_page_or_retry | ||
179 | * in mm/filemap.c. | ||
180 | */ | ||
181 | |||
182 | goto retry; | ||
183 | } | ||
162 | } | 184 | } |
163 | 185 | ||
164 | up_read(&mm->mmap_sem); | 186 | up_read(&mm->mmap_sem); |
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index aec2b111d35b..15521505ebe8 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c | |||
@@ -279,7 +279,6 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus) | |||
279 | { | 279 | { |
280 | /* Propagate hose info into the subordinate devices. */ | 280 | /* Propagate hose info into the subordinate devices. */ |
281 | 281 | ||
282 | struct list_head *ln; | ||
283 | struct pci_dev *dev = bus->self; | 282 | struct pci_dev *dev = bus->self; |
284 | 283 | ||
285 | if (pci_probe_only && dev && | 284 | if (pci_probe_only && dev && |
@@ -288,9 +287,7 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus) | |||
288 | pcibios_fixup_device_resources(dev, bus); | 287 | pcibios_fixup_device_resources(dev, bus); |
289 | } | 288 | } |
290 | 289 | ||
291 | for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) { | 290 | list_for_each_entry(dev, &bus->devices, bus_list) { |
292 | dev = pci_dev_b(ln); | ||
293 | |||
294 | if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) | 291 | if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) |
295 | pcibios_fixup_device_resources(dev, bus); | 292 | pcibios_fixup_device_resources(dev, bus); |
296 | } | 293 | } |
diff --git a/arch/mips/pmc-sierra/yosemite/ht-irq.c b/arch/mips/pmc-sierra/yosemite/ht-irq.c index 86b98e98fb4f..62ead6601c69 100644 --- a/arch/mips/pmc-sierra/yosemite/ht-irq.c +++ b/arch/mips/pmc-sierra/yosemite/ht-irq.c | |||
@@ -35,16 +35,6 @@ | |||
35 | */ | 35 | */ |
36 | void __init titan_ht_pcibios_fixup_bus(struct pci_bus *bus) | 36 | void __init titan_ht_pcibios_fixup_bus(struct pci_bus *bus) |
37 | { | 37 | { |
38 | struct pci_bus *current_bus = bus; | ||
39 | struct pci_dev *devices; | ||
40 | struct list_head *devices_link; | ||
41 | |||
42 | list_for_each(devices_link, &(current_bus->devices)) { | ||
43 | devices = pci_dev_b(devices_link); | ||
44 | if (devices == NULL) | ||
45 | continue; | ||
46 | } | ||
47 | |||
48 | /* | 38 | /* |
49 | * PLX and SPKT related changes go here | 39 | * PLX and SPKT related changes go here |
50 | */ | 40 | */ |
diff --git a/arch/mips/txx9/generic/7segled.c b/arch/mips/txx9/generic/7segled.c index 8e93b2122524..4642f56e70e5 100644 --- a/arch/mips/txx9/generic/7segled.c +++ b/arch/mips/txx9/generic/7segled.c | |||
@@ -102,7 +102,7 @@ static int __init tx_7segled_init_sysfs(void) | |||
102 | break; | 102 | break; |
103 | } | 103 | } |
104 | dev->id = i; | 104 | dev->id = i; |
105 | dev->dev = &tx_7segled_subsys; | 105 | dev->bus = &tx_7segled_subsys; |
106 | error = device_register(dev); | 106 | error = device_register(dev); |
107 | if (!error) { | 107 | if (!error) { |
108 | device_create_file(dev, &dev_attr_ascii); | 108 | device_create_file(dev, &dev_attr_ascii); |
diff --git a/arch/openrisc/include/asm/ptrace.h b/arch/openrisc/include/asm/ptrace.h index 054537c5f9c9..e612ce4512c7 100644 --- a/arch/openrisc/include/asm/ptrace.h +++ b/arch/openrisc/include/asm/ptrace.h | |||
@@ -77,7 +77,6 @@ struct pt_regs { | |||
77 | long syscallno; /* Syscall number (used by strace) */ | 77 | long syscallno; /* Syscall number (used by strace) */ |
78 | long dummy; /* Cheap alignment fix */ | 78 | long dummy; /* Cheap alignment fix */ |
79 | }; | 79 | }; |
80 | #endif /* __ASSEMBLY__ */ | ||
81 | 80 | ||
82 | /* TODO: Rename this to REDZONE because that's what it is */ | 81 | /* TODO: Rename this to REDZONE because that's what it is */ |
83 | #define STACK_FRAME_OVERHEAD 128 /* size of minimum stack frame */ | 82 | #define STACK_FRAME_OVERHEAD 128 /* size of minimum stack frame */ |
@@ -87,6 +86,13 @@ struct pt_regs { | |||
87 | #define user_stack_pointer(regs) ((unsigned long)(regs)->sp) | 86 | #define user_stack_pointer(regs) ((unsigned long)(regs)->sp) |
88 | #define profile_pc(regs) instruction_pointer(regs) | 87 | #define profile_pc(regs) instruction_pointer(regs) |
89 | 88 | ||
89 | static inline long regs_return_value(struct pt_regs *regs) | ||
90 | { | ||
91 | return regs->gpr[11]; | ||
92 | } | ||
93 | |||
94 | #endif /* __ASSEMBLY__ */ | ||
95 | |||
90 | /* | 96 | /* |
91 | * Offsets used by 'ptrace' system call interface. | 97 | * Offsets used by 'ptrace' system call interface. |
92 | */ | 98 | */ |
diff --git a/arch/openrisc/kernel/init_task.c b/arch/openrisc/kernel/init_task.c index 45744a384927..ca534082d5f3 100644 --- a/arch/openrisc/kernel/init_task.c +++ b/arch/openrisc/kernel/init_task.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <linux/init_task.h> | 18 | #include <linux/init_task.h> |
19 | #include <linux/mqueue.h> | 19 | #include <linux/mqueue.h> |
20 | #include <linux/export.h> | ||
20 | 21 | ||
21 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | 22 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); |
22 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | 23 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); |
diff --git a/arch/openrisc/kernel/irq.c b/arch/openrisc/kernel/irq.c index 59b302338331..4bfead220956 100644 --- a/arch/openrisc/kernel/irq.c +++ b/arch/openrisc/kernel/irq.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
25 | #include <linux/kernel_stat.h> | 25 | #include <linux/kernel_stat.h> |
26 | #include <linux/export.h> | ||
26 | 27 | ||
27 | #include <linux/irqflags.h> | 28 | #include <linux/irqflags.h> |
28 | 29 | ||
diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c index 656b94beab89..7259047d5f9d 100644 --- a/arch/openrisc/kernel/ptrace.c +++ b/arch/openrisc/kernel/ptrace.c | |||
@@ -188,11 +188,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | |||
188 | */ | 188 | */ |
189 | ret = -1L; | 189 | ret = -1L; |
190 | 190 | ||
191 | /* Are these regs right??? */ | 191 | audit_syscall_entry(audit_arch(), regs->syscallno, |
192 | if (unlikely(current->audit_context)) | 192 | regs->gpr[3], regs->gpr[4], |
193 | audit_syscall_entry(audit_arch(), regs->syscallno, | 193 | regs->gpr[5], regs->gpr[6]); |
194 | regs->gpr[3], regs->gpr[4], | ||
195 | regs->gpr[5], regs->gpr[6]); | ||
196 | 194 | ||
197 | return ret ? : regs->syscallno; | 195 | return ret ? : regs->syscallno; |
198 | } | 196 | } |
@@ -201,9 +199,7 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) | |||
201 | { | 199 | { |
202 | int step; | 200 | int step; |
203 | 201 | ||
204 | if (unlikely(current->audit_context)) | 202 | audit_syscall_exit(regs); |
205 | audit_syscall_exit(AUDITSC_RESULT(regs->gpr[11]), | ||
206 | regs->gpr[11]); | ||
207 | 203 | ||
208 | step = test_thread_flag(TIF_SINGLESTEP); | 204 | step = test_thread_flag(TIF_SINGLESTEP); |
209 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) | 205 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) |
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 55cca1dac431..19ab7b2ea1cd 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile | |||
@@ -31,7 +31,11 @@ ifdef CONFIG_64BIT | |||
31 | UTS_MACHINE := parisc64 | 31 | UTS_MACHINE := parisc64 |
32 | CHECKFLAGS += -D__LP64__=1 -m64 | 32 | CHECKFLAGS += -D__LP64__=1 -m64 |
33 | WIDTH := 64 | 33 | WIDTH := 64 |
34 | |||
35 | # FIXME: if no default set, should really try to locate dynamically | ||
36 | ifeq ($(CROSS_COMPILE),) | ||
34 | CROSS_COMPILE := hppa64-linux-gnu- | 37 | CROSS_COMPILE := hppa64-linux-gnu- |
38 | endif | ||
35 | else # 32-bit | 39 | else # 32-bit |
36 | WIDTH := | 40 | WIDTH := |
37 | endif | 41 | endif |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index d1727584230a..6d99a5fcc090 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -227,6 +227,9 @@ config COMPAT | |||
227 | config SYSVIPC_COMPAT | 227 | config SYSVIPC_COMPAT |
228 | def_bool y if COMPAT && SYSVIPC | 228 | def_bool y if COMPAT && SYSVIPC |
229 | 229 | ||
230 | config KEYS_COMPAT | ||
231 | def_bool y if COMPAT && KEYS | ||
232 | |||
230 | config AUDIT_ARCH | 233 | config AUDIT_ARCH |
231 | def_bool y | 234 | def_bool y |
232 | 235 | ||
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 39f8fd4438fc..c383ce440d99 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/gfp.h> | 12 | #include <linux/gfp.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/crash_dump.h> | ||
15 | #include <linux/bootmem.h> | 14 | #include <linux/bootmem.h> |
16 | #include <linux/elf.h> | 15 | #include <linux/elf.h> |
17 | #include <asm/ipl.h> | 16 | #include <asm/ipl.h> |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 5d633019d8f3..50236610de83 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -223,16 +223,38 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
223 | #ifdef CONFIG_MEMORY_HOTPLUG | 223 | #ifdef CONFIG_MEMORY_HOTPLUG |
224 | int arch_add_memory(int nid, u64 start, u64 size) | 224 | int arch_add_memory(int nid, u64 start, u64 size) |
225 | { | 225 | { |
226 | struct pglist_data *pgdat; | 226 | unsigned long zone_start_pfn, zone_end_pfn, nr_pages; |
227 | unsigned long start_pfn = PFN_DOWN(start); | ||
228 | unsigned long size_pages = PFN_DOWN(size); | ||
227 | struct zone *zone; | 229 | struct zone *zone; |
228 | int rc; | 230 | int rc; |
229 | 231 | ||
230 | pgdat = NODE_DATA(nid); | ||
231 | zone = pgdat->node_zones + ZONE_MOVABLE; | ||
232 | rc = vmem_add_mapping(start, size); | 232 | rc = vmem_add_mapping(start, size); |
233 | if (rc) | 233 | if (rc) |
234 | return rc; | 234 | return rc; |
235 | rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size)); | 235 | for_each_zone(zone) { |
236 | if (zone_idx(zone) != ZONE_MOVABLE) { | ||
237 | /* Add range within existing zone limits */ | ||
238 | zone_start_pfn = zone->zone_start_pfn; | ||
239 | zone_end_pfn = zone->zone_start_pfn + | ||
240 | zone->spanned_pages; | ||
241 | } else { | ||
242 | /* Add remaining range to ZONE_MOVABLE */ | ||
243 | zone_start_pfn = start_pfn; | ||
244 | zone_end_pfn = start_pfn + size_pages; | ||
245 | } | ||
246 | if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn) | ||
247 | continue; | ||
248 | nr_pages = (start_pfn + size_pages > zone_end_pfn) ? | ||
249 | zone_end_pfn - start_pfn : size_pages; | ||
250 | rc = __add_pages(nid, zone, start_pfn, nr_pages); | ||
251 | if (rc) | ||
252 | break; | ||
253 | start_pfn += nr_pages; | ||
254 | size_pages -= nr_pages; | ||
255 | if (!size_pages) | ||
256 | break; | ||
257 | } | ||
236 | if (rc) | 258 | if (rc) |
237 | vmem_remove_mapping(start, size); | 259 | vmem_remove_mapping(start, size); |
238 | return rc; | 260 | return rc; |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index fd843877e841..39e49091f648 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -315,6 +315,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
315 | current->mm->free_area_cache = TASK_UNMAPPED_BASE; | 315 | current->mm->free_area_cache = TASK_UNMAPPED_BASE; |
316 | current->mm->cached_hole_size = 0; | 316 | current->mm->cached_hole_size = 0; |
317 | 317 | ||
318 | retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); | ||
319 | if (retval < 0) { | ||
320 | /* Someone check-me: is this error path enough? */ | ||
321 | send_sig(SIGKILL, current, 0); | ||
322 | return retval; | ||
323 | } | ||
324 | |||
318 | install_exec_creds(bprm); | 325 | install_exec_creds(bprm); |
319 | current->flags &= ~PF_FORKNOEXEC; | 326 | current->flags &= ~PF_FORKNOEXEC; |
320 | 327 | ||
@@ -410,13 +417,6 @@ beyond_if: | |||
410 | 417 | ||
411 | set_brk(current->mm->start_brk, current->mm->brk); | 418 | set_brk(current->mm->start_brk, current->mm->brk); |
412 | 419 | ||
413 | retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); | ||
414 | if (retval < 0) { | ||
415 | /* Someone check-me: is this error path enough? */ | ||
416 | send_sig(SIGKILL, current, 0); | ||
417 | return retval; | ||
418 | } | ||
419 | |||
420 | current->mm->start_stack = | 420 | current->mm->start_stack = |
421 | (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); | 421 | (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); |
422 | /* start thread */ | 422 | /* start thread */ |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 5369059c07a9..532d2e090e6f 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -69,6 +69,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | |||
69 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 69 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
70 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 70 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
71 | obj-$(CONFIG_KPROBES) += kprobes.o | 71 | obj-$(CONFIG_KPROBES) += kprobes.o |
72 | obj-$(CONFIG_OPTPROBES) += kprobes-opt.o | ||
72 | obj-$(CONFIG_MODULES) += module.o | 73 | obj-$(CONFIG_MODULES) += module.o |
73 | obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o | 74 | obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o |
74 | obj-$(CONFIG_KGDB) += kgdb.o | 75 | obj-$(CONFIG_KGDB) += kgdb.o |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 74387c12dc72..8484e77c211e 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -271,6 +271,29 @@ struct x86_pmu_quirk { | |||
271 | void (*func)(void); | 271 | void (*func)(void); |
272 | }; | 272 | }; |
273 | 273 | ||
274 | union x86_pmu_config { | ||
275 | struct { | ||
276 | u64 event:8, | ||
277 | umask:8, | ||
278 | usr:1, | ||
279 | os:1, | ||
280 | edge:1, | ||
281 | pc:1, | ||
282 | interrupt:1, | ||
283 | __reserved1:1, | ||
284 | en:1, | ||
285 | inv:1, | ||
286 | cmask:8, | ||
287 | event2:4, | ||
288 | __reserved2:4, | ||
289 | go:1, | ||
290 | ho:1; | ||
291 | } bits; | ||
292 | u64 value; | ||
293 | }; | ||
294 | |||
295 | #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value | ||
296 | |||
274 | /* | 297 | /* |
275 | * struct x86_pmu - generic x86 pmu | 298 | * struct x86_pmu - generic x86 pmu |
276 | */ | 299 | */ |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 6627089232a7..6a84e7f28f05 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -385,14 +385,15 @@ static __initconst const u64 westmere_hw_cache_event_ids | |||
385 | #define NHM_LOCAL_DRAM (1 << 14) | 385 | #define NHM_LOCAL_DRAM (1 << 14) |
386 | #define NHM_NON_DRAM (1 << 15) | 386 | #define NHM_NON_DRAM (1 << 15) |
387 | 387 | ||
388 | #define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM) | 388 | #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD) |
389 | #define NHM_REMOTE (NHM_REMOTE_DRAM) | ||
389 | 390 | ||
390 | #define NHM_DMND_READ (NHM_DMND_DATA_RD) | 391 | #define NHM_DMND_READ (NHM_DMND_DATA_RD) |
391 | #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB) | 392 | #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB) |
392 | #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO) | 393 | #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO) |
393 | 394 | ||
394 | #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM) | 395 | #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM) |
395 | #define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD) | 396 | #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD) |
396 | #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS) | 397 | #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS) |
397 | 398 | ||
398 | static __initconst const u64 nehalem_hw_cache_extra_regs | 399 | static __initconst const u64 nehalem_hw_cache_extra_regs |
@@ -416,16 +417,16 @@ static __initconst const u64 nehalem_hw_cache_extra_regs | |||
416 | }, | 417 | }, |
417 | [ C(NODE) ] = { | 418 | [ C(NODE) ] = { |
418 | [ C(OP_READ) ] = { | 419 | [ C(OP_READ) ] = { |
419 | [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_ALL_DRAM, | 420 | [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE, |
420 | [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE_DRAM, | 421 | [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE, |
421 | }, | 422 | }, |
422 | [ C(OP_WRITE) ] = { | 423 | [ C(OP_WRITE) ] = { |
423 | [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_ALL_DRAM, | 424 | [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE, |
424 | [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE_DRAM, | 425 | [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE, |
425 | }, | 426 | }, |
426 | [ C(OP_PREFETCH) ] = { | 427 | [ C(OP_PREFETCH) ] = { |
427 | [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_ALL_DRAM, | 428 | [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE, |
428 | [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE_DRAM, | 429 | [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE, |
429 | }, | 430 | }, |
430 | }, | 431 | }, |
431 | }; | 432 | }; |
@@ -1328,7 +1329,8 @@ static int intel_pmu_hw_config(struct perf_event *event) | |||
1328 | * | 1329 | * |
1329 | * Thereby we gain a PEBS capable cycle counter. | 1330 | * Thereby we gain a PEBS capable cycle counter. |
1330 | */ | 1331 | */ |
1331 | u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */ | 1332 | u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); |
1333 | |||
1332 | 1334 | ||
1333 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); | 1335 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); |
1334 | event->hw.config = alt_config; | 1336 | event->hw.config = alt_config; |
@@ -1755,9 +1757,11 @@ __init int intel_pmu_init(void) | |||
1755 | x86_pmu.extra_regs = intel_nehalem_extra_regs; | 1757 | x86_pmu.extra_regs = intel_nehalem_extra_regs; |
1756 | 1758 | ||
1757 | /* UOPS_ISSUED.STALLED_CYCLES */ | 1759 | /* UOPS_ISSUED.STALLED_CYCLES */ |
1758 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; | 1760 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = |
1761 | X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); | ||
1759 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ | 1762 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ |
1760 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; | 1763 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = |
1764 | X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); | ||
1761 | 1765 | ||
1762 | x86_add_quirk(intel_nehalem_quirk); | 1766 | x86_add_quirk(intel_nehalem_quirk); |
1763 | 1767 | ||
@@ -1792,9 +1796,11 @@ __init int intel_pmu_init(void) | |||
1792 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | 1796 | x86_pmu.er_flags |= ERF_HAS_RSP_1; |
1793 | 1797 | ||
1794 | /* UOPS_ISSUED.STALLED_CYCLES */ | 1798 | /* UOPS_ISSUED.STALLED_CYCLES */ |
1795 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; | 1799 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = |
1800 | X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); | ||
1796 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ | 1801 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ |
1797 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; | 1802 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = |
1803 | X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); | ||
1798 | 1804 | ||
1799 | pr_cont("Westmere events, "); | 1805 | pr_cont("Westmere events, "); |
1800 | break; | 1806 | break; |
@@ -1815,9 +1821,11 @@ __init int intel_pmu_init(void) | |||
1815 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; | 1821 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; |
1816 | 1822 | ||
1817 | /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ | 1823 | /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ |
1818 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; | 1824 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = |
1825 | X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); | ||
1819 | /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ | 1826 | /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ |
1820 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1; | 1827 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = |
1828 | X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); | ||
1821 | 1829 | ||
1822 | pr_cont("SandyBridge events, "); | 1830 | pr_cont("SandyBridge events, "); |
1823 | break; | 1831 | break; |
diff --git a/arch/x86/kernel/kprobes-common.h b/arch/x86/kernel/kprobes-common.h new file mode 100644 index 000000000000..3230b68ef29a --- /dev/null +++ b/arch/x86/kernel/kprobes-common.h | |||
@@ -0,0 +1,102 @@ | |||
1 | #ifndef __X86_KERNEL_KPROBES_COMMON_H | ||
2 | #define __X86_KERNEL_KPROBES_COMMON_H | ||
3 | |||
4 | /* Kprobes and Optprobes common header */ | ||
5 | |||
6 | #ifdef CONFIG_X86_64 | ||
7 | #define SAVE_REGS_STRING \ | ||
8 | /* Skip cs, ip, orig_ax. */ \ | ||
9 | " subq $24, %rsp\n" \ | ||
10 | " pushq %rdi\n" \ | ||
11 | " pushq %rsi\n" \ | ||
12 | " pushq %rdx\n" \ | ||
13 | " pushq %rcx\n" \ | ||
14 | " pushq %rax\n" \ | ||
15 | " pushq %r8\n" \ | ||
16 | " pushq %r9\n" \ | ||
17 | " pushq %r10\n" \ | ||
18 | " pushq %r11\n" \ | ||
19 | " pushq %rbx\n" \ | ||
20 | " pushq %rbp\n" \ | ||
21 | " pushq %r12\n" \ | ||
22 | " pushq %r13\n" \ | ||
23 | " pushq %r14\n" \ | ||
24 | " pushq %r15\n" | ||
25 | #define RESTORE_REGS_STRING \ | ||
26 | " popq %r15\n" \ | ||
27 | " popq %r14\n" \ | ||
28 | " popq %r13\n" \ | ||
29 | " popq %r12\n" \ | ||
30 | " popq %rbp\n" \ | ||
31 | " popq %rbx\n" \ | ||
32 | " popq %r11\n" \ | ||
33 | " popq %r10\n" \ | ||
34 | " popq %r9\n" \ | ||
35 | " popq %r8\n" \ | ||
36 | " popq %rax\n" \ | ||
37 | " popq %rcx\n" \ | ||
38 | " popq %rdx\n" \ | ||
39 | " popq %rsi\n" \ | ||
40 | " popq %rdi\n" \ | ||
41 | /* Skip orig_ax, ip, cs */ \ | ||
42 | " addq $24, %rsp\n" | ||
43 | #else | ||
44 | #define SAVE_REGS_STRING \ | ||
45 | /* Skip cs, ip, orig_ax and gs. */ \ | ||
46 | " subl $16, %esp\n" \ | ||
47 | " pushl %fs\n" \ | ||
48 | " pushl %es\n" \ | ||
49 | " pushl %ds\n" \ | ||
50 | " pushl %eax\n" \ | ||
51 | " pushl %ebp\n" \ | ||
52 | " pushl %edi\n" \ | ||
53 | " pushl %esi\n" \ | ||
54 | " pushl %edx\n" \ | ||
55 | " pushl %ecx\n" \ | ||
56 | " pushl %ebx\n" | ||
57 | #define RESTORE_REGS_STRING \ | ||
58 | " popl %ebx\n" \ | ||
59 | " popl %ecx\n" \ | ||
60 | " popl %edx\n" \ | ||
61 | " popl %esi\n" \ | ||
62 | " popl %edi\n" \ | ||
63 | " popl %ebp\n" \ | ||
64 | " popl %eax\n" \ | ||
65 | /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\ | ||
66 | " addl $24, %esp\n" | ||
67 | #endif | ||
68 | |||
69 | /* Ensure if the instruction can be boostable */ | ||
70 | extern int can_boost(kprobe_opcode_t *instruction); | ||
71 | /* Recover instruction if given address is probed */ | ||
72 | extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, | ||
73 | unsigned long addr); | ||
74 | /* | ||
75 | * Copy an instruction and adjust the displacement if the instruction | ||
76 | * uses the %rip-relative addressing mode. | ||
77 | */ | ||
78 | extern int __copy_instruction(u8 *dest, u8 *src); | ||
79 | |||
80 | /* Generate a relative-jump/call instruction */ | ||
81 | extern void synthesize_reljump(void *from, void *to); | ||
82 | extern void synthesize_relcall(void *from, void *to); | ||
83 | |||
84 | #ifdef CONFIG_OPTPROBES | ||
85 | extern int arch_init_optprobes(void); | ||
86 | extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter); | ||
87 | extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr); | ||
88 | #else /* !CONFIG_OPTPROBES */ | ||
89 | static inline int arch_init_optprobes(void) | ||
90 | { | ||
91 | return 0; | ||
92 | } | ||
93 | static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) | ||
94 | { | ||
95 | return 0; | ||
96 | } | ||
97 | static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) | ||
98 | { | ||
99 | return addr; | ||
100 | } | ||
101 | #endif | ||
102 | #endif | ||
diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c new file mode 100644 index 000000000000..c5e410eed403 --- /dev/null +++ b/arch/x86/kernel/kprobes-opt.c | |||
@@ -0,0 +1,512 @@ | |||
1 | /* | ||
2 | * Kernel Probes Jump Optimization (Optprobes) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2002, 2004 | ||
19 | * Copyright (C) Hitachi Ltd., 2012 | ||
20 | */ | ||
21 | #include <linux/kprobes.h> | ||
22 | #include <linux/ptrace.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/hardirq.h> | ||
26 | #include <linux/preempt.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/kdebug.h> | ||
29 | #include <linux/kallsyms.h> | ||
30 | #include <linux/ftrace.h> | ||
31 | |||
32 | #include <asm/cacheflush.h> | ||
33 | #include <asm/desc.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/uaccess.h> | ||
36 | #include <asm/alternative.h> | ||
37 | #include <asm/insn.h> | ||
38 | #include <asm/debugreg.h> | ||
39 | |||
40 | #include "kprobes-common.h" | ||
41 | |||
42 | unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) | ||
43 | { | ||
44 | struct optimized_kprobe *op; | ||
45 | struct kprobe *kp; | ||
46 | long offs; | ||
47 | int i; | ||
48 | |||
49 | for (i = 0; i < RELATIVEJUMP_SIZE; i++) { | ||
50 | kp = get_kprobe((void *)addr - i); | ||
51 | /* This function only handles jump-optimized kprobe */ | ||
52 | if (kp && kprobe_optimized(kp)) { | ||
53 | op = container_of(kp, struct optimized_kprobe, kp); | ||
54 | /* If op->list is not empty, op is under optimizing */ | ||
55 | if (list_empty(&op->list)) | ||
56 | goto found; | ||
57 | } | ||
58 | } | ||
59 | |||
60 | return addr; | ||
61 | found: | ||
62 | /* | ||
63 | * If the kprobe can be optimized, original bytes which can be | ||
64 | * overwritten by jump destination address. In this case, original | ||
65 | * bytes must be recovered from op->optinsn.copied_insn buffer. | ||
66 | */ | ||
67 | memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | ||
68 | if (addr == (unsigned long)kp->addr) { | ||
69 | buf[0] = kp->opcode; | ||
70 | memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
71 | } else { | ||
72 | offs = addr - (unsigned long)kp->addr - 1; | ||
73 | memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs); | ||
74 | } | ||
75 | |||
76 | return (unsigned long)buf; | ||
77 | } | ||
78 | |||
79 | /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ | ||
80 | static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) | ||
81 | { | ||
82 | #ifdef CONFIG_X86_64 | ||
83 | *addr++ = 0x48; | ||
84 | *addr++ = 0xbf; | ||
85 | #else | ||
86 | *addr++ = 0xb8; | ||
87 | #endif | ||
88 | *(unsigned long *)addr = val; | ||
89 | } | ||
90 | |||
91 | static void __used __kprobes kprobes_optinsn_template_holder(void) | ||
92 | { | ||
93 | asm volatile ( | ||
94 | ".global optprobe_template_entry\n" | ||
95 | "optprobe_template_entry:\n" | ||
96 | #ifdef CONFIG_X86_64 | ||
97 | /* We don't bother saving the ss register */ | ||
98 | " pushq %rsp\n" | ||
99 | " pushfq\n" | ||
100 | SAVE_REGS_STRING | ||
101 | " movq %rsp, %rsi\n" | ||
102 | ".global optprobe_template_val\n" | ||
103 | "optprobe_template_val:\n" | ||
104 | ASM_NOP5 | ||
105 | ASM_NOP5 | ||
106 | ".global optprobe_template_call\n" | ||
107 | "optprobe_template_call:\n" | ||
108 | ASM_NOP5 | ||
109 | /* Move flags to rsp */ | ||
110 | " movq 144(%rsp), %rdx\n" | ||
111 | " movq %rdx, 152(%rsp)\n" | ||
112 | RESTORE_REGS_STRING | ||
113 | /* Skip flags entry */ | ||
114 | " addq $8, %rsp\n" | ||
115 | " popfq\n" | ||
116 | #else /* CONFIG_X86_32 */ | ||
117 | " pushf\n" | ||
118 | SAVE_REGS_STRING | ||
119 | " movl %esp, %edx\n" | ||
120 | ".global optprobe_template_val\n" | ||
121 | "optprobe_template_val:\n" | ||
122 | ASM_NOP5 | ||
123 | ".global optprobe_template_call\n" | ||
124 | "optprobe_template_call:\n" | ||
125 | ASM_NOP5 | ||
126 | RESTORE_REGS_STRING | ||
127 | " addl $4, %esp\n" /* skip cs */ | ||
128 | " popf\n" | ||
129 | #endif | ||
130 | ".global optprobe_template_end\n" | ||
131 | "optprobe_template_end:\n"); | ||
132 | } | ||
133 | |||
134 | #define TMPL_MOVE_IDX \ | ||
135 | ((long)&optprobe_template_val - (long)&optprobe_template_entry) | ||
136 | #define TMPL_CALL_IDX \ | ||
137 | ((long)&optprobe_template_call - (long)&optprobe_template_entry) | ||
138 | #define TMPL_END_IDX \ | ||
139 | ((long)&optprobe_template_end - (long)&optprobe_template_entry) | ||
140 | |||
141 | #define INT3_SIZE sizeof(kprobe_opcode_t) | ||
142 | |||
143 | /* Optimized kprobe call back function: called from optinsn */ | ||
144 | static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) | ||
145 | { | ||
146 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
147 | unsigned long flags; | ||
148 | |||
149 | /* This is possible if op is under delayed unoptimizing */ | ||
150 | if (kprobe_disabled(&op->kp)) | ||
151 | return; | ||
152 | |||
153 | local_irq_save(flags); | ||
154 | if (kprobe_running()) { | ||
155 | kprobes_inc_nmissed_count(&op->kp); | ||
156 | } else { | ||
157 | /* Save skipped registers */ | ||
158 | #ifdef CONFIG_X86_64 | ||
159 | regs->cs = __KERNEL_CS; | ||
160 | #else | ||
161 | regs->cs = __KERNEL_CS | get_kernel_rpl(); | ||
162 | regs->gs = 0; | ||
163 | #endif | ||
164 | regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; | ||
165 | regs->orig_ax = ~0UL; | ||
166 | |||
167 | __this_cpu_write(current_kprobe, &op->kp); | ||
168 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
169 | opt_pre_handler(&op->kp, regs); | ||
170 | __this_cpu_write(current_kprobe, NULL); | ||
171 | } | ||
172 | local_irq_restore(flags); | ||
173 | } | ||
174 | |||
175 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | ||
176 | { | ||
177 | int len = 0, ret; | ||
178 | |||
179 | while (len < RELATIVEJUMP_SIZE) { | ||
180 | ret = __copy_instruction(dest + len, src + len); | ||
181 | if (!ret || !can_boost(dest + len)) | ||
182 | return -EINVAL; | ||
183 | len += ret; | ||
184 | } | ||
185 | /* Check whether the address range is reserved */ | ||
186 | if (ftrace_text_reserved(src, src + len - 1) || | ||
187 | alternatives_text_reserved(src, src + len - 1) || | ||
188 | jump_label_text_reserved(src, src + len - 1)) | ||
189 | return -EBUSY; | ||
190 | |||
191 | return len; | ||
192 | } | ||
193 | |||
194 | /* Check whether insn is indirect jump */ | ||
195 | static int __kprobes insn_is_indirect_jump(struct insn *insn) | ||
196 | { | ||
197 | return ((insn->opcode.bytes[0] == 0xff && | ||
198 | (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ | ||
199 | insn->opcode.bytes[0] == 0xea); /* Segment based jump */ | ||
200 | } | ||
201 | |||
202 | /* Check whether insn jumps into specified address range */ | ||
203 | static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) | ||
204 | { | ||
205 | unsigned long target = 0; | ||
206 | |||
207 | switch (insn->opcode.bytes[0]) { | ||
208 | case 0xe0: /* loopne */ | ||
209 | case 0xe1: /* loope */ | ||
210 | case 0xe2: /* loop */ | ||
211 | case 0xe3: /* jcxz */ | ||
212 | case 0xe9: /* near relative jump */ | ||
213 | case 0xeb: /* short relative jump */ | ||
214 | break; | ||
215 | case 0x0f: | ||
216 | if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ | ||
217 | break; | ||
218 | return 0; | ||
219 | default: | ||
220 | if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ | ||
221 | break; | ||
222 | return 0; | ||
223 | } | ||
224 | target = (unsigned long)insn->next_byte + insn->immediate.value; | ||
225 | |||
226 | return (start <= target && target <= start + len); | ||
227 | } | ||
228 | |||
229 | /* Decode whole function to ensure any instructions don't jump into target */ | ||
230 | static int __kprobes can_optimize(unsigned long paddr) | ||
231 | { | ||
232 | unsigned long addr, size = 0, offset = 0; | ||
233 | struct insn insn; | ||
234 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | ||
235 | |||
236 | /* Lookup symbol including addr */ | ||
237 | if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) | ||
238 | return 0; | ||
239 | |||
240 | /* | ||
241 | * Do not optimize in the entry code due to the unstable | ||
242 | * stack handling. | ||
243 | */ | ||
244 | if ((paddr >= (unsigned long)__entry_text_start) && | ||
245 | (paddr < (unsigned long)__entry_text_end)) | ||
246 | return 0; | ||
247 | |||
248 | /* Check there is enough space for a relative jump. */ | ||
249 | if (size - offset < RELATIVEJUMP_SIZE) | ||
250 | return 0; | ||
251 | |||
252 | /* Decode instructions */ | ||
253 | addr = paddr - offset; | ||
254 | while (addr < paddr - offset + size) { /* Decode until function end */ | ||
255 | if (search_exception_tables(addr)) | ||
256 | /* | ||
257 | * Since some fixup code will jumps into this function, | ||
258 | * we can't optimize kprobe in this function. | ||
259 | */ | ||
260 | return 0; | ||
261 | kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr)); | ||
262 | insn_get_length(&insn); | ||
263 | /* Another subsystem puts a breakpoint */ | ||
264 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) | ||
265 | return 0; | ||
266 | /* Recover address */ | ||
267 | insn.kaddr = (void *)addr; | ||
268 | insn.next_byte = (void *)(addr + insn.length); | ||
269 | /* Check any instructions don't jump into target */ | ||
270 | if (insn_is_indirect_jump(&insn) || | ||
271 | insn_jump_into_range(&insn, paddr + INT3_SIZE, | ||
272 | RELATIVE_ADDR_SIZE)) | ||
273 | return 0; | ||
274 | addr += insn.length; | ||
275 | } | ||
276 | |||
277 | return 1; | ||
278 | } | ||
279 | |||
280 | /* Check optimized_kprobe can actually be optimized. */ | ||
281 | int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) | ||
282 | { | ||
283 | int i; | ||
284 | struct kprobe *p; | ||
285 | |||
286 | for (i = 1; i < op->optinsn.size; i++) { | ||
287 | p = get_kprobe(op->kp.addr + i); | ||
288 | if (p && !kprobe_disabled(p)) | ||
289 | return -EEXIST; | ||
290 | } | ||
291 | |||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | /* Check the addr is within the optimized instructions. */ | ||
296 | int __kprobes | ||
297 | arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) | ||
298 | { | ||
299 | return ((unsigned long)op->kp.addr <= addr && | ||
300 | (unsigned long)op->kp.addr + op->optinsn.size > addr); | ||
301 | } | ||
302 | |||
303 | /* Free optimized instruction slot */ | ||
304 | static __kprobes | ||
305 | void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) | ||
306 | { | ||
307 | if (op->optinsn.insn) { | ||
308 | free_optinsn_slot(op->optinsn.insn, dirty); | ||
309 | op->optinsn.insn = NULL; | ||
310 | op->optinsn.size = 0; | ||
311 | } | ||
312 | } | ||
313 | |||
314 | void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) | ||
315 | { | ||
316 | __arch_remove_optimized_kprobe(op, 1); | ||
317 | } | ||
318 | |||
319 | /* | ||
320 | * Copy replacing target instructions | ||
321 | * Target instructions MUST be relocatable (checked inside) | ||
322 | * This is called when new aggr(opt)probe is allocated or reused. | ||
323 | */ | ||
324 | int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) | ||
325 | { | ||
326 | u8 *buf; | ||
327 | int ret; | ||
328 | long rel; | ||
329 | |||
330 | if (!can_optimize((unsigned long)op->kp.addr)) | ||
331 | return -EILSEQ; | ||
332 | |||
333 | op->optinsn.insn = get_optinsn_slot(); | ||
334 | if (!op->optinsn.insn) | ||
335 | return -ENOMEM; | ||
336 | |||
337 | /* | ||
338 | * Verify if the address gap is in 2GB range, because this uses | ||
339 | * a relative jump. | ||
340 | */ | ||
341 | rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; | ||
342 | if (abs(rel) > 0x7fffffff) | ||
343 | return -ERANGE; | ||
344 | |||
345 | buf = (u8 *)op->optinsn.insn; | ||
346 | |||
347 | /* Copy instructions into the out-of-line buffer */ | ||
348 | ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); | ||
349 | if (ret < 0) { | ||
350 | __arch_remove_optimized_kprobe(op, 0); | ||
351 | return ret; | ||
352 | } | ||
353 | op->optinsn.size = ret; | ||
354 | |||
355 | /* Copy arch-dep-instance from template */ | ||
356 | memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); | ||
357 | |||
358 | /* Set probe information */ | ||
359 | synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); | ||
360 | |||
361 | /* Set probe function call */ | ||
362 | synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); | ||
363 | |||
364 | /* Set returning jmp instruction at the tail of out-of-line buffer */ | ||
365 | synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, | ||
366 | (u8 *)op->kp.addr + op->optinsn.size); | ||
367 | |||
368 | flush_icache_range((unsigned long) buf, | ||
369 | (unsigned long) buf + TMPL_END_IDX + | ||
370 | op->optinsn.size + RELATIVEJUMP_SIZE); | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | #define MAX_OPTIMIZE_PROBES 256 | ||
375 | static struct text_poke_param *jump_poke_params; | ||
376 | static struct jump_poke_buffer { | ||
377 | u8 buf[RELATIVEJUMP_SIZE]; | ||
378 | } *jump_poke_bufs; | ||
379 | |||
380 | static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, | ||
381 | u8 *insn_buf, | ||
382 | struct optimized_kprobe *op) | ||
383 | { | ||
384 | s32 rel = (s32)((long)op->optinsn.insn - | ||
385 | ((long)op->kp.addr + RELATIVEJUMP_SIZE)); | ||
386 | |||
387 | /* Backup instructions which will be replaced by jump address */ | ||
388 | memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, | ||
389 | RELATIVE_ADDR_SIZE); | ||
390 | |||
391 | insn_buf[0] = RELATIVEJUMP_OPCODE; | ||
392 | *(s32 *)(&insn_buf[1]) = rel; | ||
393 | |||
394 | tprm->addr = op->kp.addr; | ||
395 | tprm->opcode = insn_buf; | ||
396 | tprm->len = RELATIVEJUMP_SIZE; | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * Replace breakpoints (int3) with relative jumps. | ||
401 | * Caller must call with locking kprobe_mutex and text_mutex. | ||
402 | */ | ||
403 | void __kprobes arch_optimize_kprobes(struct list_head *oplist) | ||
404 | { | ||
405 | struct optimized_kprobe *op, *tmp; | ||
406 | int c = 0; | ||
407 | |||
408 | list_for_each_entry_safe(op, tmp, oplist, list) { | ||
409 | WARN_ON(kprobe_disabled(&op->kp)); | ||
410 | /* Setup param */ | ||
411 | setup_optimize_kprobe(&jump_poke_params[c], | ||
412 | jump_poke_bufs[c].buf, op); | ||
413 | list_del_init(&op->list); | ||
414 | if (++c >= MAX_OPTIMIZE_PROBES) | ||
415 | break; | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * text_poke_smp doesn't support NMI/MCE code modifying. | ||
420 | * However, since kprobes itself also doesn't support NMI/MCE | ||
421 | * code probing, it's not a problem. | ||
422 | */ | ||
423 | text_poke_smp_batch(jump_poke_params, c); | ||
424 | } | ||
425 | |||
426 | static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, | ||
427 | u8 *insn_buf, | ||
428 | struct optimized_kprobe *op) | ||
429 | { | ||
430 | /* Set int3 to first byte for kprobes */ | ||
431 | insn_buf[0] = BREAKPOINT_INSTRUCTION; | ||
432 | memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
433 | |||
434 | tprm->addr = op->kp.addr; | ||
435 | tprm->opcode = insn_buf; | ||
436 | tprm->len = RELATIVEJUMP_SIZE; | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * Recover original instructions and breakpoints from relative jumps. | ||
441 | * Caller must call with locking kprobe_mutex. | ||
442 | */ | ||
443 | extern void arch_unoptimize_kprobes(struct list_head *oplist, | ||
444 | struct list_head *done_list) | ||
445 | { | ||
446 | struct optimized_kprobe *op, *tmp; | ||
447 | int c = 0; | ||
448 | |||
449 | list_for_each_entry_safe(op, tmp, oplist, list) { | ||
450 | /* Setup param */ | ||
451 | setup_unoptimize_kprobe(&jump_poke_params[c], | ||
452 | jump_poke_bufs[c].buf, op); | ||
453 | list_move(&op->list, done_list); | ||
454 | if (++c >= MAX_OPTIMIZE_PROBES) | ||
455 | break; | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * text_poke_smp doesn't support NMI/MCE code modifying. | ||
460 | * However, since kprobes itself also doesn't support NMI/MCE | ||
461 | * code probing, it's not a problem. | ||
462 | */ | ||
463 | text_poke_smp_batch(jump_poke_params, c); | ||
464 | } | ||
465 | |||
466 | /* Replace a relative jump with a breakpoint (int3). */ | ||
467 | void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) | ||
468 | { | ||
469 | u8 buf[RELATIVEJUMP_SIZE]; | ||
470 | |||
471 | /* Set int3 to first byte for kprobes */ | ||
472 | buf[0] = BREAKPOINT_INSTRUCTION; | ||
473 | memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
474 | text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE); | ||
475 | } | ||
476 | |||
477 | int __kprobes | ||
478 | setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) | ||
479 | { | ||
480 | struct optimized_kprobe *op; | ||
481 | |||
482 | if (p->flags & KPROBE_FLAG_OPTIMIZED) { | ||
483 | /* This kprobe is really able to run optimized path. */ | ||
484 | op = container_of(p, struct optimized_kprobe, kp); | ||
485 | /* Detour through copied instructions */ | ||
486 | regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; | ||
487 | if (!reenter) | ||
488 | reset_current_kprobe(); | ||
489 | preempt_enable_no_resched(); | ||
490 | return 1; | ||
491 | } | ||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | int __kprobes arch_init_optprobes(void) | ||
496 | { | ||
497 | /* Allocate code buffer and parameter array */ | ||
498 | jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) * | ||
499 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | ||
500 | if (!jump_poke_bufs) | ||
501 | return -ENOMEM; | ||
502 | |||
503 | jump_poke_params = kmalloc(sizeof(struct text_poke_param) * | ||
504 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | ||
505 | if (!jump_poke_params) { | ||
506 | kfree(jump_poke_bufs); | ||
507 | jump_poke_bufs = NULL; | ||
508 | return -ENOMEM; | ||
509 | } | ||
510 | |||
511 | return 0; | ||
512 | } | ||
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 7da647d8b64c..e213fc8408d2 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -30,16 +30,15 @@ | |||
30 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi | 30 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi |
31 | * <prasanna@in.ibm.com> added function-return probes. | 31 | * <prasanna@in.ibm.com> added function-return probes. |
32 | * 2005-May Rusty Lynch <rusty.lynch@intel.com> | 32 | * 2005-May Rusty Lynch <rusty.lynch@intel.com> |
33 | * Added function return probes functionality | 33 | * Added function return probes functionality |
34 | * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added | 34 | * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added |
35 | * kprobe-booster and kretprobe-booster for i386. | 35 | * kprobe-booster and kretprobe-booster for i386. |
36 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster | 36 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster |
37 | * and kretprobe-booster for x86-64 | 37 | * and kretprobe-booster for x86-64 |
38 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven | 38 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven |
39 | * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com> | 39 | * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com> |
40 | * unified x86 kprobes code. | 40 | * unified x86 kprobes code. |
41 | */ | 41 | */ |
42 | |||
43 | #include <linux/kprobes.h> | 42 | #include <linux/kprobes.h> |
44 | #include <linux/ptrace.h> | 43 | #include <linux/ptrace.h> |
45 | #include <linux/string.h> | 44 | #include <linux/string.h> |
@@ -59,6 +58,8 @@ | |||
59 | #include <asm/insn.h> | 58 | #include <asm/insn.h> |
60 | #include <asm/debugreg.h> | 59 | #include <asm/debugreg.h> |
61 | 60 | ||
61 | #include "kprobes-common.h" | ||
62 | |||
62 | void jprobe_return_end(void); | 63 | void jprobe_return_end(void); |
63 | 64 | ||
64 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | 65 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
@@ -108,6 +109,7 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = { | |||
108 | doesn't switch kernel stack.*/ | 109 | doesn't switch kernel stack.*/ |
109 | {NULL, NULL} /* Terminator */ | 110 | {NULL, NULL} /* Terminator */ |
110 | }; | 111 | }; |
112 | |||
111 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); | 113 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); |
112 | 114 | ||
113 | static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) | 115 | static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) |
@@ -123,11 +125,17 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) | |||
123 | } | 125 | } |
124 | 126 | ||
125 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ | 127 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ |
126 | static void __kprobes synthesize_reljump(void *from, void *to) | 128 | void __kprobes synthesize_reljump(void *from, void *to) |
127 | { | 129 | { |
128 | __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); | 130 | __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); |
129 | } | 131 | } |
130 | 132 | ||
133 | /* Insert a call instruction at address 'from', which calls address 'to'.*/ | ||
134 | void __kprobes synthesize_relcall(void *from, void *to) | ||
135 | { | ||
136 | __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); | ||
137 | } | ||
138 | |||
131 | /* | 139 | /* |
132 | * Skip the prefixes of the instruction. | 140 | * Skip the prefixes of the instruction. |
133 | */ | 141 | */ |
@@ -151,7 +159,7 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) | |||
151 | * Returns non-zero if opcode is boostable. | 159 | * Returns non-zero if opcode is boostable. |
152 | * RIP relative instructions are adjusted at copying time in 64 bits mode | 160 | * RIP relative instructions are adjusted at copying time in 64 bits mode |
153 | */ | 161 | */ |
154 | static int __kprobes can_boost(kprobe_opcode_t *opcodes) | 162 | int __kprobes can_boost(kprobe_opcode_t *opcodes) |
155 | { | 163 | { |
156 | kprobe_opcode_t opcode; | 164 | kprobe_opcode_t opcode; |
157 | kprobe_opcode_t *orig_opcodes = opcodes; | 165 | kprobe_opcode_t *orig_opcodes = opcodes; |
@@ -207,13 +215,15 @@ retry: | |||
207 | } | 215 | } |
208 | } | 216 | } |
209 | 217 | ||
210 | /* Recover the probed instruction at addr for further analysis. */ | 218 | static unsigned long |
211 | static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) | 219 | __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) |
212 | { | 220 | { |
213 | struct kprobe *kp; | 221 | struct kprobe *kp; |
222 | |||
214 | kp = get_kprobe((void *)addr); | 223 | kp = get_kprobe((void *)addr); |
224 | /* There is no probe, return original address */ | ||
215 | if (!kp) | 225 | if (!kp) |
216 | return -EINVAL; | 226 | return addr; |
217 | 227 | ||
218 | /* | 228 | /* |
219 | * Basically, kp->ainsn.insn has an original instruction. | 229 | * Basically, kp->ainsn.insn has an original instruction. |
@@ -230,14 +240,29 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) | |||
230 | */ | 240 | */ |
231 | memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | 241 | memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); |
232 | buf[0] = kp->opcode; | 242 | buf[0] = kp->opcode; |
233 | return 0; | 243 | return (unsigned long)buf; |
244 | } | ||
245 | |||
246 | /* | ||
247 | * Recover the probed instruction at addr for further analysis. | ||
248 | * Caller must lock kprobes by kprobe_mutex, or disable preemption | ||
249 | * for preventing to release referencing kprobes. | ||
250 | */ | ||
251 | unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) | ||
252 | { | ||
253 | unsigned long __addr; | ||
254 | |||
255 | __addr = __recover_optprobed_insn(buf, addr); | ||
256 | if (__addr != addr) | ||
257 | return __addr; | ||
258 | |||
259 | return __recover_probed_insn(buf, addr); | ||
234 | } | 260 | } |
235 | 261 | ||
236 | /* Check if paddr is at an instruction boundary */ | 262 | /* Check if paddr is at an instruction boundary */ |
237 | static int __kprobes can_probe(unsigned long paddr) | 263 | static int __kprobes can_probe(unsigned long paddr) |
238 | { | 264 | { |
239 | int ret; | 265 | unsigned long addr, __addr, offset = 0; |
240 | unsigned long addr, offset = 0; | ||
241 | struct insn insn; | 266 | struct insn insn; |
242 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | 267 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
243 | 268 | ||
@@ -247,26 +272,24 @@ static int __kprobes can_probe(unsigned long paddr) | |||
247 | /* Decode instructions */ | 272 | /* Decode instructions */ |
248 | addr = paddr - offset; | 273 | addr = paddr - offset; |
249 | while (addr < paddr) { | 274 | while (addr < paddr) { |
250 | kernel_insn_init(&insn, (void *)addr); | ||
251 | insn_get_opcode(&insn); | ||
252 | |||
253 | /* | 275 | /* |
254 | * Check if the instruction has been modified by another | 276 | * Check if the instruction has been modified by another |
255 | * kprobe, in which case we replace the breakpoint by the | 277 | * kprobe, in which case we replace the breakpoint by the |
256 | * original instruction in our buffer. | 278 | * original instruction in our buffer. |
279 | * Also, jump optimization will change the breakpoint to | ||
280 | * relative-jump. Since the relative-jump itself is | ||
281 | * normally used, we just go through if there is no kprobe. | ||
257 | */ | 282 | */ |
258 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { | 283 | __addr = recover_probed_instruction(buf, addr); |
259 | ret = recover_probed_instruction(buf, addr); | 284 | kernel_insn_init(&insn, (void *)__addr); |
260 | if (ret) | ||
261 | /* | ||
262 | * Another debugging subsystem might insert | ||
263 | * this breakpoint. In that case, we can't | ||
264 | * recover it. | ||
265 | */ | ||
266 | return 0; | ||
267 | kernel_insn_init(&insn, buf); | ||
268 | } | ||
269 | insn_get_length(&insn); | 285 | insn_get_length(&insn); |
286 | |||
287 | /* | ||
288 | * Another debugging subsystem might insert this breakpoint. | ||
289 | * In that case, we can't recover it. | ||
290 | */ | ||
291 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) | ||
292 | return 0; | ||
270 | addr += insn.length; | 293 | addr += insn.length; |
271 | } | 294 | } |
272 | 295 | ||
@@ -299,24 +322,16 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) | |||
299 | * If not, return null. | 322 | * If not, return null. |
300 | * Only applicable to 64-bit x86. | 323 | * Only applicable to 64-bit x86. |
301 | */ | 324 | */ |
302 | static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) | 325 | int __kprobes __copy_instruction(u8 *dest, u8 *src) |
303 | { | 326 | { |
304 | struct insn insn; | 327 | struct insn insn; |
305 | int ret; | ||
306 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | 328 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
307 | 329 | ||
308 | kernel_insn_init(&insn, src); | 330 | kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src)); |
309 | if (recover) { | ||
310 | insn_get_opcode(&insn); | ||
311 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { | ||
312 | ret = recover_probed_instruction(buf, | ||
313 | (unsigned long)src); | ||
314 | if (ret) | ||
315 | return 0; | ||
316 | kernel_insn_init(&insn, buf); | ||
317 | } | ||
318 | } | ||
319 | insn_get_length(&insn); | 331 | insn_get_length(&insn); |
332 | /* Another subsystem puts a breakpoint, failed to recover */ | ||
333 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) | ||
334 | return 0; | ||
320 | memcpy(dest, insn.kaddr, insn.length); | 335 | memcpy(dest, insn.kaddr, insn.length); |
321 | 336 | ||
322 | #ifdef CONFIG_X86_64 | 337 | #ifdef CONFIG_X86_64 |
@@ -337,8 +352,7 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) | |||
337 | * extension of the original signed 32-bit displacement would | 352 | * extension of the original signed 32-bit displacement would |
338 | * have given. | 353 | * have given. |
339 | */ | 354 | */ |
340 | newdisp = (u8 *) src + (s64) insn.displacement.value - | 355 | newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; |
341 | (u8 *) dest; | ||
342 | BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ | 356 | BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ |
343 | disp = (u8 *) dest + insn_offset_displacement(&insn); | 357 | disp = (u8 *) dest + insn_offset_displacement(&insn); |
344 | *(s32 *) disp = (s32) newdisp; | 358 | *(s32 *) disp = (s32) newdisp; |
@@ -349,18 +363,20 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) | |||
349 | 363 | ||
350 | static void __kprobes arch_copy_kprobe(struct kprobe *p) | 364 | static void __kprobes arch_copy_kprobe(struct kprobe *p) |
351 | { | 365 | { |
366 | /* Copy an instruction with recovering if other optprobe modifies it.*/ | ||
367 | __copy_instruction(p->ainsn.insn, p->addr); | ||
368 | |||
352 | /* | 369 | /* |
353 | * Copy an instruction without recovering int3, because it will be | 370 | * __copy_instruction can modify the displacement of the instruction, |
354 | * put by another subsystem. | 371 | * but it doesn't affect boostable check. |
355 | */ | 372 | */ |
356 | __copy_instruction(p->ainsn.insn, p->addr, 0); | 373 | if (can_boost(p->ainsn.insn)) |
357 | |||
358 | if (can_boost(p->addr)) | ||
359 | p->ainsn.boostable = 0; | 374 | p->ainsn.boostable = 0; |
360 | else | 375 | else |
361 | p->ainsn.boostable = -1; | 376 | p->ainsn.boostable = -1; |
362 | 377 | ||
363 | p->opcode = *p->addr; | 378 | /* Also, displacement change doesn't affect the first byte */ |
379 | p->opcode = p->ainsn.insn[0]; | ||
364 | } | 380 | } |
365 | 381 | ||
366 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 382 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
@@ -442,8 +458,8 @@ static void __kprobes restore_btf(void) | |||
442 | } | 458 | } |
443 | } | 459 | } |
444 | 460 | ||
445 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | 461 | void __kprobes |
446 | struct pt_regs *regs) | 462 | arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) |
447 | { | 463 | { |
448 | unsigned long *sara = stack_addr(regs); | 464 | unsigned long *sara = stack_addr(regs); |
449 | 465 | ||
@@ -453,16 +469,8 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | |||
453 | *sara = (unsigned long) &kretprobe_trampoline; | 469 | *sara = (unsigned long) &kretprobe_trampoline; |
454 | } | 470 | } |
455 | 471 | ||
456 | #ifdef CONFIG_OPTPROBES | 472 | static void __kprobes |
457 | static int __kprobes setup_detour_execution(struct kprobe *p, | 473 | setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) |
458 | struct pt_regs *regs, | ||
459 | int reenter); | ||
460 | #else | ||
461 | #define setup_detour_execution(p, regs, reenter) (0) | ||
462 | #endif | ||
463 | |||
464 | static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
465 | struct kprobe_ctlblk *kcb, int reenter) | ||
466 | { | 474 | { |
467 | if (setup_detour_execution(p, regs, reenter)) | 475 | if (setup_detour_execution(p, regs, reenter)) |
468 | return; | 476 | return; |
@@ -504,8 +512,8 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, | |||
504 | * within the handler. We save the original kprobes variables and just single | 512 | * within the handler. We save the original kprobes variables and just single |
505 | * step on the instruction of the new probe without calling any user handlers. | 513 | * step on the instruction of the new probe without calling any user handlers. |
506 | */ | 514 | */ |
507 | static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, | 515 | static int __kprobes |
508 | struct kprobe_ctlblk *kcb) | 516 | reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) |
509 | { | 517 | { |
510 | switch (kcb->kprobe_status) { | 518 | switch (kcb->kprobe_status) { |
511 | case KPROBE_HIT_SSDONE: | 519 | case KPROBE_HIT_SSDONE: |
@@ -600,69 +608,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
600 | return 0; | 608 | return 0; |
601 | } | 609 | } |
602 | 610 | ||
603 | #ifdef CONFIG_X86_64 | ||
604 | #define SAVE_REGS_STRING \ | ||
605 | /* Skip cs, ip, orig_ax. */ \ | ||
606 | " subq $24, %rsp\n" \ | ||
607 | " pushq %rdi\n" \ | ||
608 | " pushq %rsi\n" \ | ||
609 | " pushq %rdx\n" \ | ||
610 | " pushq %rcx\n" \ | ||
611 | " pushq %rax\n" \ | ||
612 | " pushq %r8\n" \ | ||
613 | " pushq %r9\n" \ | ||
614 | " pushq %r10\n" \ | ||
615 | " pushq %r11\n" \ | ||
616 | " pushq %rbx\n" \ | ||
617 | " pushq %rbp\n" \ | ||
618 | " pushq %r12\n" \ | ||
619 | " pushq %r13\n" \ | ||
620 | " pushq %r14\n" \ | ||
621 | " pushq %r15\n" | ||
622 | #define RESTORE_REGS_STRING \ | ||
623 | " popq %r15\n" \ | ||
624 | " popq %r14\n" \ | ||
625 | " popq %r13\n" \ | ||
626 | " popq %r12\n" \ | ||
627 | " popq %rbp\n" \ | ||
628 | " popq %rbx\n" \ | ||
629 | " popq %r11\n" \ | ||
630 | " popq %r10\n" \ | ||
631 | " popq %r9\n" \ | ||
632 | " popq %r8\n" \ | ||
633 | " popq %rax\n" \ | ||
634 | " popq %rcx\n" \ | ||
635 | " popq %rdx\n" \ | ||
636 | " popq %rsi\n" \ | ||
637 | " popq %rdi\n" \ | ||
638 | /* Skip orig_ax, ip, cs */ \ | ||
639 | " addq $24, %rsp\n" | ||
640 | #else | ||
641 | #define SAVE_REGS_STRING \ | ||
642 | /* Skip cs, ip, orig_ax and gs. */ \ | ||
643 | " subl $16, %esp\n" \ | ||
644 | " pushl %fs\n" \ | ||
645 | " pushl %es\n" \ | ||
646 | " pushl %ds\n" \ | ||
647 | " pushl %eax\n" \ | ||
648 | " pushl %ebp\n" \ | ||
649 | " pushl %edi\n" \ | ||
650 | " pushl %esi\n" \ | ||
651 | " pushl %edx\n" \ | ||
652 | " pushl %ecx\n" \ | ||
653 | " pushl %ebx\n" | ||
654 | #define RESTORE_REGS_STRING \ | ||
655 | " popl %ebx\n" \ | ||
656 | " popl %ecx\n" \ | ||
657 | " popl %edx\n" \ | ||
658 | " popl %esi\n" \ | ||
659 | " popl %edi\n" \ | ||
660 | " popl %ebp\n" \ | ||
661 | " popl %eax\n" \ | ||
662 | /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\ | ||
663 | " addl $24, %esp\n" | ||
664 | #endif | ||
665 | |||
666 | /* | 611 | /* |
667 | * When a retprobed function returns, this code saves registers and | 612 | * When a retprobed function returns, this code saves registers and |
668 | * calls trampoline_handler() runs, which calls the kretprobe's handler. | 613 | * calls trampoline_handler() runs, which calls the kretprobe's handler. |
@@ -816,8 +761,8 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
816 | * jump instruction after the copied instruction, that jumps to the next | 761 | * jump instruction after the copied instruction, that jumps to the next |
817 | * instruction after the probepoint. | 762 | * instruction after the probepoint. |
818 | */ | 763 | */ |
819 | static void __kprobes resume_execution(struct kprobe *p, | 764 | static void __kprobes |
820 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) | 765 | resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) |
821 | { | 766 | { |
822 | unsigned long *tos = stack_addr(regs); | 767 | unsigned long *tos = stack_addr(regs); |
823 | unsigned long copy_ip = (unsigned long)p->ainsn.insn; | 768 | unsigned long copy_ip = (unsigned long)p->ainsn.insn; |
@@ -996,8 +941,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
996 | /* | 941 | /* |
997 | * Wrapper routine for handling exceptions. | 942 | * Wrapper routine for handling exceptions. |
998 | */ | 943 | */ |
999 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | 944 | int __kprobes |
1000 | unsigned long val, void *data) | 945 | kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) |
1001 | { | 946 | { |
1002 | struct die_args *args = data; | 947 | struct die_args *args = data; |
1003 | int ret = NOTIFY_DONE; | 948 | int ret = NOTIFY_DONE; |
@@ -1107,466 +1052,9 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
1107 | return 0; | 1052 | return 0; |
1108 | } | 1053 | } |
1109 | 1054 | ||
1110 | |||
1111 | #ifdef CONFIG_OPTPROBES | ||
1112 | |||
1113 | /* Insert a call instruction at address 'from', which calls address 'to'.*/ | ||
1114 | static void __kprobes synthesize_relcall(void *from, void *to) | ||
1115 | { | ||
1116 | __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); | ||
1117 | } | ||
1118 | |||
1119 | /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ | ||
1120 | static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, | ||
1121 | unsigned long val) | ||
1122 | { | ||
1123 | #ifdef CONFIG_X86_64 | ||
1124 | *addr++ = 0x48; | ||
1125 | *addr++ = 0xbf; | ||
1126 | #else | ||
1127 | *addr++ = 0xb8; | ||
1128 | #endif | ||
1129 | *(unsigned long *)addr = val; | ||
1130 | } | ||
1131 | |||
1132 | static void __used __kprobes kprobes_optinsn_template_holder(void) | ||
1133 | { | ||
1134 | asm volatile ( | ||
1135 | ".global optprobe_template_entry\n" | ||
1136 | "optprobe_template_entry: \n" | ||
1137 | #ifdef CONFIG_X86_64 | ||
1138 | /* We don't bother saving the ss register */ | ||
1139 | " pushq %rsp\n" | ||
1140 | " pushfq\n" | ||
1141 | SAVE_REGS_STRING | ||
1142 | " movq %rsp, %rsi\n" | ||
1143 | ".global optprobe_template_val\n" | ||
1144 | "optprobe_template_val: \n" | ||
1145 | ASM_NOP5 | ||
1146 | ASM_NOP5 | ||
1147 | ".global optprobe_template_call\n" | ||
1148 | "optprobe_template_call: \n" | ||
1149 | ASM_NOP5 | ||
1150 | /* Move flags to rsp */ | ||
1151 | " movq 144(%rsp), %rdx\n" | ||
1152 | " movq %rdx, 152(%rsp)\n" | ||
1153 | RESTORE_REGS_STRING | ||
1154 | /* Skip flags entry */ | ||
1155 | " addq $8, %rsp\n" | ||
1156 | " popfq\n" | ||
1157 | #else /* CONFIG_X86_32 */ | ||
1158 | " pushf\n" | ||
1159 | SAVE_REGS_STRING | ||
1160 | " movl %esp, %edx\n" | ||
1161 | ".global optprobe_template_val\n" | ||
1162 | "optprobe_template_val: \n" | ||
1163 | ASM_NOP5 | ||
1164 | ".global optprobe_template_call\n" | ||
1165 | "optprobe_template_call: \n" | ||
1166 | ASM_NOP5 | ||
1167 | RESTORE_REGS_STRING | ||
1168 | " addl $4, %esp\n" /* skip cs */ | ||
1169 | " popf\n" | ||
1170 | #endif | ||
1171 | ".global optprobe_template_end\n" | ||
1172 | "optprobe_template_end: \n"); | ||
1173 | } | ||
1174 | |||
1175 | #define TMPL_MOVE_IDX \ | ||
1176 | ((long)&optprobe_template_val - (long)&optprobe_template_entry) | ||
1177 | #define TMPL_CALL_IDX \ | ||
1178 | ((long)&optprobe_template_call - (long)&optprobe_template_entry) | ||
1179 | #define TMPL_END_IDX \ | ||
1180 | ((long)&optprobe_template_end - (long)&optprobe_template_entry) | ||
1181 | |||
1182 | #define INT3_SIZE sizeof(kprobe_opcode_t) | ||
1183 | |||
1184 | /* Optimized kprobe call back function: called from optinsn */ | ||
1185 | static void __kprobes optimized_callback(struct optimized_kprobe *op, | ||
1186 | struct pt_regs *regs) | ||
1187 | { | ||
1188 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
1189 | unsigned long flags; | ||
1190 | |||
1191 | /* This is possible if op is under delayed unoptimizing */ | ||
1192 | if (kprobe_disabled(&op->kp)) | ||
1193 | return; | ||
1194 | |||
1195 | local_irq_save(flags); | ||
1196 | if (kprobe_running()) { | ||
1197 | kprobes_inc_nmissed_count(&op->kp); | ||
1198 | } else { | ||
1199 | /* Save skipped registers */ | ||
1200 | #ifdef CONFIG_X86_64 | ||
1201 | regs->cs = __KERNEL_CS; | ||
1202 | #else | ||
1203 | regs->cs = __KERNEL_CS | get_kernel_rpl(); | ||
1204 | regs->gs = 0; | ||
1205 | #endif | ||
1206 | regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; | ||
1207 | regs->orig_ax = ~0UL; | ||
1208 | |||
1209 | __this_cpu_write(current_kprobe, &op->kp); | ||
1210 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
1211 | opt_pre_handler(&op->kp, regs); | ||
1212 | __this_cpu_write(current_kprobe, NULL); | ||
1213 | } | ||
1214 | local_irq_restore(flags); | ||
1215 | } | ||
1216 | |||
1217 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | ||
1218 | { | ||
1219 | int len = 0, ret; | ||
1220 | |||
1221 | while (len < RELATIVEJUMP_SIZE) { | ||
1222 | ret = __copy_instruction(dest + len, src + len, 1); | ||
1223 | if (!ret || !can_boost(dest + len)) | ||
1224 | return -EINVAL; | ||
1225 | len += ret; | ||
1226 | } | ||
1227 | /* Check whether the address range is reserved */ | ||
1228 | if (ftrace_text_reserved(src, src + len - 1) || | ||
1229 | alternatives_text_reserved(src, src + len - 1) || | ||
1230 | jump_label_text_reserved(src, src + len - 1)) | ||
1231 | return -EBUSY; | ||
1232 | |||
1233 | return len; | ||
1234 | } | ||
1235 | |||
1236 | /* Check whether insn is indirect jump */ | ||
1237 | static int __kprobes insn_is_indirect_jump(struct insn *insn) | ||
1238 | { | ||
1239 | return ((insn->opcode.bytes[0] == 0xff && | ||
1240 | (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ | ||
1241 | insn->opcode.bytes[0] == 0xea); /* Segment based jump */ | ||
1242 | } | ||
1243 | |||
1244 | /* Check whether insn jumps into specified address range */ | ||
1245 | static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) | ||
1246 | { | ||
1247 | unsigned long target = 0; | ||
1248 | |||
1249 | switch (insn->opcode.bytes[0]) { | ||
1250 | case 0xe0: /* loopne */ | ||
1251 | case 0xe1: /* loope */ | ||
1252 | case 0xe2: /* loop */ | ||
1253 | case 0xe3: /* jcxz */ | ||
1254 | case 0xe9: /* near relative jump */ | ||
1255 | case 0xeb: /* short relative jump */ | ||
1256 | break; | ||
1257 | case 0x0f: | ||
1258 | if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ | ||
1259 | break; | ||
1260 | return 0; | ||
1261 | default: | ||
1262 | if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ | ||
1263 | break; | ||
1264 | return 0; | ||
1265 | } | ||
1266 | target = (unsigned long)insn->next_byte + insn->immediate.value; | ||
1267 | |||
1268 | return (start <= target && target <= start + len); | ||
1269 | } | ||
1270 | |||
1271 | /* Decode whole function to ensure any instructions don't jump into target */ | ||
1272 | static int __kprobes can_optimize(unsigned long paddr) | ||
1273 | { | ||
1274 | int ret; | ||
1275 | unsigned long addr, size = 0, offset = 0; | ||
1276 | struct insn insn; | ||
1277 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | ||
1278 | |||
1279 | /* Lookup symbol including addr */ | ||
1280 | if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) | ||
1281 | return 0; | ||
1282 | |||
1283 | /* | ||
1284 | * Do not optimize in the entry code due to the unstable | ||
1285 | * stack handling. | ||
1286 | */ | ||
1287 | if ((paddr >= (unsigned long )__entry_text_start) && | ||
1288 | (paddr < (unsigned long )__entry_text_end)) | ||
1289 | return 0; | ||
1290 | |||
1291 | /* Check there is enough space for a relative jump. */ | ||
1292 | if (size - offset < RELATIVEJUMP_SIZE) | ||
1293 | return 0; | ||
1294 | |||
1295 | /* Decode instructions */ | ||
1296 | addr = paddr - offset; | ||
1297 | while (addr < paddr - offset + size) { /* Decode until function end */ | ||
1298 | if (search_exception_tables(addr)) | ||
1299 | /* | ||
1300 | * Since some fixup code will jumps into this function, | ||
1301 | * we can't optimize kprobe in this function. | ||
1302 | */ | ||
1303 | return 0; | ||
1304 | kernel_insn_init(&insn, (void *)addr); | ||
1305 | insn_get_opcode(&insn); | ||
1306 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { | ||
1307 | ret = recover_probed_instruction(buf, addr); | ||
1308 | if (ret) | ||
1309 | return 0; | ||
1310 | kernel_insn_init(&insn, buf); | ||
1311 | } | ||
1312 | insn_get_length(&insn); | ||
1313 | /* Recover address */ | ||
1314 | insn.kaddr = (void *)addr; | ||
1315 | insn.next_byte = (void *)(addr + insn.length); | ||
1316 | /* Check any instructions don't jump into target */ | ||
1317 | if (insn_is_indirect_jump(&insn) || | ||
1318 | insn_jump_into_range(&insn, paddr + INT3_SIZE, | ||
1319 | RELATIVE_ADDR_SIZE)) | ||
1320 | return 0; | ||
1321 | addr += insn.length; | ||
1322 | } | ||
1323 | |||
1324 | return 1; | ||
1325 | } | ||
1326 | |||
1327 | /* Check optimized_kprobe can actually be optimized. */ | ||
1328 | int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) | ||
1329 | { | ||
1330 | int i; | ||
1331 | struct kprobe *p; | ||
1332 | |||
1333 | for (i = 1; i < op->optinsn.size; i++) { | ||
1334 | p = get_kprobe(op->kp.addr + i); | ||
1335 | if (p && !kprobe_disabled(p)) | ||
1336 | return -EEXIST; | ||
1337 | } | ||
1338 | |||
1339 | return 0; | ||
1340 | } | ||
1341 | |||
1342 | /* Check the addr is within the optimized instructions. */ | ||
1343 | int __kprobes arch_within_optimized_kprobe(struct optimized_kprobe *op, | ||
1344 | unsigned long addr) | ||
1345 | { | ||
1346 | return ((unsigned long)op->kp.addr <= addr && | ||
1347 | (unsigned long)op->kp.addr + op->optinsn.size > addr); | ||
1348 | } | ||
1349 | |||
1350 | /* Free optimized instruction slot */ | ||
1351 | static __kprobes | ||
1352 | void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) | ||
1353 | { | ||
1354 | if (op->optinsn.insn) { | ||
1355 | free_optinsn_slot(op->optinsn.insn, dirty); | ||
1356 | op->optinsn.insn = NULL; | ||
1357 | op->optinsn.size = 0; | ||
1358 | } | ||
1359 | } | ||
1360 | |||
1361 | void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) | ||
1362 | { | ||
1363 | __arch_remove_optimized_kprobe(op, 1); | ||
1364 | } | ||
1365 | |||
1366 | /* | ||
1367 | * Copy replacing target instructions | ||
1368 | * Target instructions MUST be relocatable (checked inside) | ||
1369 | */ | ||
1370 | int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) | ||
1371 | { | ||
1372 | u8 *buf; | ||
1373 | int ret; | ||
1374 | long rel; | ||
1375 | |||
1376 | if (!can_optimize((unsigned long)op->kp.addr)) | ||
1377 | return -EILSEQ; | ||
1378 | |||
1379 | op->optinsn.insn = get_optinsn_slot(); | ||
1380 | if (!op->optinsn.insn) | ||
1381 | return -ENOMEM; | ||
1382 | |||
1383 | /* | ||
1384 | * Verify if the address gap is in 2GB range, because this uses | ||
1385 | * a relative jump. | ||
1386 | */ | ||
1387 | rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; | ||
1388 | if (abs(rel) > 0x7fffffff) | ||
1389 | return -ERANGE; | ||
1390 | |||
1391 | buf = (u8 *)op->optinsn.insn; | ||
1392 | |||
1393 | /* Copy instructions into the out-of-line buffer */ | ||
1394 | ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); | ||
1395 | if (ret < 0) { | ||
1396 | __arch_remove_optimized_kprobe(op, 0); | ||
1397 | return ret; | ||
1398 | } | ||
1399 | op->optinsn.size = ret; | ||
1400 | |||
1401 | /* Copy arch-dep-instance from template */ | ||
1402 | memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); | ||
1403 | |||
1404 | /* Set probe information */ | ||
1405 | synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); | ||
1406 | |||
1407 | /* Set probe function call */ | ||
1408 | synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); | ||
1409 | |||
1410 | /* Set returning jmp instruction at the tail of out-of-line buffer */ | ||
1411 | synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, | ||
1412 | (u8 *)op->kp.addr + op->optinsn.size); | ||
1413 | |||
1414 | flush_icache_range((unsigned long) buf, | ||
1415 | (unsigned long) buf + TMPL_END_IDX + | ||
1416 | op->optinsn.size + RELATIVEJUMP_SIZE); | ||
1417 | return 0; | ||
1418 | } | ||
1419 | |||
1420 | #define MAX_OPTIMIZE_PROBES 256 | ||
1421 | static struct text_poke_param *jump_poke_params; | ||
1422 | static struct jump_poke_buffer { | ||
1423 | u8 buf[RELATIVEJUMP_SIZE]; | ||
1424 | } *jump_poke_bufs; | ||
1425 | |||
1426 | static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, | ||
1427 | u8 *insn_buf, | ||
1428 | struct optimized_kprobe *op) | ||
1429 | { | ||
1430 | s32 rel = (s32)((long)op->optinsn.insn - | ||
1431 | ((long)op->kp.addr + RELATIVEJUMP_SIZE)); | ||
1432 | |||
1433 | /* Backup instructions which will be replaced by jump address */ | ||
1434 | memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, | ||
1435 | RELATIVE_ADDR_SIZE); | ||
1436 | |||
1437 | insn_buf[0] = RELATIVEJUMP_OPCODE; | ||
1438 | *(s32 *)(&insn_buf[1]) = rel; | ||
1439 | |||
1440 | tprm->addr = op->kp.addr; | ||
1441 | tprm->opcode = insn_buf; | ||
1442 | tprm->len = RELATIVEJUMP_SIZE; | ||
1443 | } | ||
1444 | |||
1445 | /* | ||
1446 | * Replace breakpoints (int3) with relative jumps. | ||
1447 | * Caller must call with locking kprobe_mutex and text_mutex. | ||
1448 | */ | ||
1449 | void __kprobes arch_optimize_kprobes(struct list_head *oplist) | ||
1450 | { | ||
1451 | struct optimized_kprobe *op, *tmp; | ||
1452 | int c = 0; | ||
1453 | |||
1454 | list_for_each_entry_safe(op, tmp, oplist, list) { | ||
1455 | WARN_ON(kprobe_disabled(&op->kp)); | ||
1456 | /* Setup param */ | ||
1457 | setup_optimize_kprobe(&jump_poke_params[c], | ||
1458 | jump_poke_bufs[c].buf, op); | ||
1459 | list_del_init(&op->list); | ||
1460 | if (++c >= MAX_OPTIMIZE_PROBES) | ||
1461 | break; | ||
1462 | } | ||
1463 | |||
1464 | /* | ||
1465 | * text_poke_smp doesn't support NMI/MCE code modifying. | ||
1466 | * However, since kprobes itself also doesn't support NMI/MCE | ||
1467 | * code probing, it's not a problem. | ||
1468 | */ | ||
1469 | text_poke_smp_batch(jump_poke_params, c); | ||
1470 | } | ||
1471 | |||
1472 | static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, | ||
1473 | u8 *insn_buf, | ||
1474 | struct optimized_kprobe *op) | ||
1475 | { | ||
1476 | /* Set int3 to first byte for kprobes */ | ||
1477 | insn_buf[0] = BREAKPOINT_INSTRUCTION; | ||
1478 | memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
1479 | |||
1480 | tprm->addr = op->kp.addr; | ||
1481 | tprm->opcode = insn_buf; | ||
1482 | tprm->len = RELATIVEJUMP_SIZE; | ||
1483 | } | ||
1484 | |||
1485 | /* | ||
1486 | * Recover original instructions and breakpoints from relative jumps. | ||
1487 | * Caller must call with locking kprobe_mutex. | ||
1488 | */ | ||
1489 | extern void arch_unoptimize_kprobes(struct list_head *oplist, | ||
1490 | struct list_head *done_list) | ||
1491 | { | ||
1492 | struct optimized_kprobe *op, *tmp; | ||
1493 | int c = 0; | ||
1494 | |||
1495 | list_for_each_entry_safe(op, tmp, oplist, list) { | ||
1496 | /* Setup param */ | ||
1497 | setup_unoptimize_kprobe(&jump_poke_params[c], | ||
1498 | jump_poke_bufs[c].buf, op); | ||
1499 | list_move(&op->list, done_list); | ||
1500 | if (++c >= MAX_OPTIMIZE_PROBES) | ||
1501 | break; | ||
1502 | } | ||
1503 | |||
1504 | /* | ||
1505 | * text_poke_smp doesn't support NMI/MCE code modifying. | ||
1506 | * However, since kprobes itself also doesn't support NMI/MCE | ||
1507 | * code probing, it's not a problem. | ||
1508 | */ | ||
1509 | text_poke_smp_batch(jump_poke_params, c); | ||
1510 | } | ||
1511 | |||
1512 | /* Replace a relative jump with a breakpoint (int3). */ | ||
1513 | void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) | ||
1514 | { | ||
1515 | u8 buf[RELATIVEJUMP_SIZE]; | ||
1516 | |||
1517 | /* Set int3 to first byte for kprobes */ | ||
1518 | buf[0] = BREAKPOINT_INSTRUCTION; | ||
1519 | memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
1520 | text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE); | ||
1521 | } | ||
1522 | |||
1523 | static int __kprobes setup_detour_execution(struct kprobe *p, | ||
1524 | struct pt_regs *regs, | ||
1525 | int reenter) | ||
1526 | { | ||
1527 | struct optimized_kprobe *op; | ||
1528 | |||
1529 | if (p->flags & KPROBE_FLAG_OPTIMIZED) { | ||
1530 | /* This kprobe is really able to run optimized path. */ | ||
1531 | op = container_of(p, struct optimized_kprobe, kp); | ||
1532 | /* Detour through copied instructions */ | ||
1533 | regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; | ||
1534 | if (!reenter) | ||
1535 | reset_current_kprobe(); | ||
1536 | preempt_enable_no_resched(); | ||
1537 | return 1; | ||
1538 | } | ||
1539 | return 0; | ||
1540 | } | ||
1541 | |||
1542 | static int __kprobes init_poke_params(void) | ||
1543 | { | ||
1544 | /* Allocate code buffer and parameter array */ | ||
1545 | jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) * | ||
1546 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | ||
1547 | if (!jump_poke_bufs) | ||
1548 | return -ENOMEM; | ||
1549 | |||
1550 | jump_poke_params = kmalloc(sizeof(struct text_poke_param) * | ||
1551 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | ||
1552 | if (!jump_poke_params) { | ||
1553 | kfree(jump_poke_bufs); | ||
1554 | jump_poke_bufs = NULL; | ||
1555 | return -ENOMEM; | ||
1556 | } | ||
1557 | |||
1558 | return 0; | ||
1559 | } | ||
1560 | #else /* !CONFIG_OPTPROBES */ | ||
1561 | static int __kprobes init_poke_params(void) | ||
1562 | { | ||
1563 | return 0; | ||
1564 | } | ||
1565 | #endif | ||
1566 | |||
1567 | int __init arch_init_kprobes(void) | 1055 | int __init arch_init_kprobes(void) |
1568 | { | 1056 | { |
1569 | return init_poke_params(); | 1057 | return arch_init_optprobes(); |
1570 | } | 1058 | } |
1571 | 1059 | ||
1572 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | 1060 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) |
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index fc45ba887d05..e395693abdb1 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c | |||
@@ -48,9 +48,9 @@ static void delay_loop(unsigned long loops) | |||
48 | } | 48 | } |
49 | 49 | ||
50 | /* TSC based delay: */ | 50 | /* TSC based delay: */ |
51 | static void delay_tsc(unsigned long loops) | 51 | static void delay_tsc(unsigned long __loops) |
52 | { | 52 | { |
53 | unsigned long bclock, now; | 53 | u32 bclock, now, loops = __loops; |
54 | int cpu; | 54 | int cpu; |
55 | 55 | ||
56 | preempt_disable(); | 56 | preempt_disable(); |
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index f581a18c0d4d..8ecbb4bba4b3 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c | |||
@@ -333,13 +333,15 @@ try_again: | |||
333 | * Lookup failure means no vma is above this address, | 333 | * Lookup failure means no vma is above this address, |
334 | * i.e. return with success: | 334 | * i.e. return with success: |
335 | */ | 335 | */ |
336 | if (!(vma = find_vma_prev(mm, addr, &prev_vma))) | 336 | vma = find_vma(mm, addr); |
337 | if (!vma) | ||
337 | return addr; | 338 | return addr; |
338 | 339 | ||
339 | /* | 340 | /* |
340 | * new region fits between prev_vma->vm_end and | 341 | * new region fits between prev_vma->vm_end and |
341 | * vma->vm_start, use it: | 342 | * vma->vm_start, use it: |
342 | */ | 343 | */ |
344 | prev_vma = vma->vm_prev; | ||
343 | if (addr + len <= vma->vm_start && | 345 | if (addr + len <= vma->vm_start && |
344 | (!prev_vma || (addr >= prev_vma->vm_end))) { | 346 | (!prev_vma || (addr >= prev_vma->vm_end))) { |
345 | /* remember the address as a hint for next time */ | 347 | /* remember the address as a hint for next time */ |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index a312e76063a7..49a5cb55429b 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -60,6 +60,16 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = { | |||
60 | DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), | 60 | DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), |
61 | }, | 61 | }, |
62 | }, | 62 | }, |
63 | /* https://bugzilla.kernel.org/show_bug.cgi?id=42619 */ | ||
64 | { | ||
65 | .callback = set_use_crs, | ||
66 | .ident = "MSI MS-7253", | ||
67 | .matches = { | ||
68 | DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), | ||
69 | DMI_MATCH(DMI_BOARD_NAME, "MS-7253"), | ||
70 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), | ||
71 | }, | ||
72 | }, | ||
63 | 73 | ||
64 | /* Now for the blacklist.. */ | 74 | /* Now for the blacklist.. */ |
65 | 75 | ||
@@ -282,9 +292,6 @@ static void add_resources(struct pci_root_info *info) | |||
282 | int i; | 292 | int i; |
283 | struct resource *res, *root, *conflict; | 293 | struct resource *res, *root, *conflict; |
284 | 294 | ||
285 | if (!pci_use_crs) | ||
286 | return; | ||
287 | |||
288 | coalesce_windows(info, IORESOURCE_MEM); | 295 | coalesce_windows(info, IORESOURCE_MEM); |
289 | coalesce_windows(info, IORESOURCE_IO); | 296 | coalesce_windows(info, IORESOURCE_IO); |
290 | 297 | ||
@@ -336,8 +343,13 @@ get_current_resources(struct acpi_device *device, int busnum, | |||
336 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, | 343 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, |
337 | &info); | 344 | &info); |
338 | 345 | ||
339 | add_resources(&info); | 346 | if (pci_use_crs) { |
340 | return; | 347 | add_resources(&info); |
348 | |||
349 | return; | ||
350 | } | ||
351 | |||
352 | kfree(info.name); | ||
341 | 353 | ||
342 | name_alloc_fail: | 354 | name_alloc_fail: |
343 | kfree(info.res); | 355 | kfree(info.res); |