diff options
Diffstat (limited to 'drivers/bus')
-rw-r--r-- | drivers/bus/Kconfig | 2 | ||||
-rw-r--r-- | drivers/bus/arm-cci.c | 555 | ||||
-rw-r--r-- | drivers/bus/brcmstb_gisb.c | 168 | ||||
-rw-r--r-- | drivers/bus/imx-weim.c | 1 | ||||
-rw-r--r-- | drivers/bus/mvebu-mbus.c | 180 | ||||
-rw-r--r-- | drivers/bus/omap-ocp2scp.c | 1 | ||||
-rw-r--r-- | drivers/bus/omap_l3_noc.c | 64 | ||||
-rw-r--r-- | drivers/bus/omap_l3_smx.c | 26 |
8 files changed, 847 insertions, 150 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 603eb1be4f6a..b99729e36860 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig | |||
@@ -6,7 +6,7 @@ menu "Bus devices" | |||
6 | 6 | ||
7 | config BRCMSTB_GISB_ARB | 7 | config BRCMSTB_GISB_ARB |
8 | bool "Broadcom STB GISB bus arbiter" | 8 | bool "Broadcom STB GISB bus arbiter" |
9 | depends on ARM | 9 | depends on ARM || MIPS |
10 | help | 10 | help |
11 | Driver for the Broadcom Set Top Box System-on-a-chip internal bus | 11 | Driver for the Broadcom Set Top Box System-on-a-chip internal bus |
12 | arbiter. This driver provides timeout and target abort error handling | 12 | arbiter. This driver provides timeout and target abort error handling |
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 7af78df241f2..0ce5e2d65a06 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c | |||
@@ -16,17 +16,17 @@ | |||
16 | 16 | ||
17 | #include <linux/arm-cci.h> | 17 | #include <linux/arm-cci.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/interrupt.h> | ||
19 | #include <linux/module.h> | 20 | #include <linux/module.h> |
20 | #include <linux/of_address.h> | 21 | #include <linux/of_address.h> |
21 | #include <linux/of_irq.h> | 22 | #include <linux/of_irq.h> |
22 | #include <linux/of_platform.h> | 23 | #include <linux/of_platform.h> |
24 | #include <linux/perf_event.h> | ||
23 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
24 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
25 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
26 | 28 | ||
27 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
28 | #include <asm/irq_regs.h> | ||
29 | #include <asm/pmu.h> | ||
30 | #include <asm/smp_plat.h> | 30 | #include <asm/smp_plat.h> |
31 | 31 | ||
32 | #define DRIVER_NAME "CCI-400" | 32 | #define DRIVER_NAME "CCI-400" |
@@ -98,6 +98,8 @@ static unsigned long cci_ctrl_phys; | |||
98 | 98 | ||
99 | #define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K) | 99 | #define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K) |
100 | 100 | ||
101 | #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) | ||
102 | |||
101 | /* | 103 | /* |
102 | * Instead of an event id to monitor CCI cycles, a dedicated counter is | 104 | * Instead of an event id to monitor CCI cycles, a dedicated counter is |
103 | * provided. Use 0xff to represent CCI cycles and hope that no future revisions | 105 | * provided. Use 0xff to represent CCI cycles and hope that no future revisions |
@@ -170,18 +172,29 @@ static char *const pmu_names[] = { | |||
170 | [CCI_REV_R1] = "CCI_400_r1", | 172 | [CCI_REV_R1] = "CCI_400_r1", |
171 | }; | 173 | }; |
172 | 174 | ||
173 | struct cci_pmu_drv_data { | 175 | struct cci_pmu_hw_events { |
176 | struct perf_event *events[CCI_PMU_MAX_HW_EVENTS]; | ||
177 | unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)]; | ||
178 | raw_spinlock_t pmu_lock; | ||
179 | }; | ||
180 | |||
181 | struct cci_pmu { | ||
174 | void __iomem *base; | 182 | void __iomem *base; |
175 | struct arm_pmu *cci_pmu; | 183 | struct pmu pmu; |
176 | int nr_irqs; | 184 | int nr_irqs; |
177 | int irqs[CCI_PMU_MAX_HW_EVENTS]; | 185 | int irqs[CCI_PMU_MAX_HW_EVENTS]; |
178 | unsigned long active_irqs; | 186 | unsigned long active_irqs; |
179 | struct perf_event *events[CCI_PMU_MAX_HW_EVENTS]; | ||
180 | unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)]; | ||
181 | struct pmu_port_event_ranges *port_ranges; | 187 | struct pmu_port_event_ranges *port_ranges; |
182 | struct pmu_hw_events hw_events; | 188 | struct cci_pmu_hw_events hw_events; |
189 | struct platform_device *plat_device; | ||
190 | int num_events; | ||
191 | atomic_t active_events; | ||
192 | struct mutex reserve_mutex; | ||
193 | cpumask_t cpus; | ||
183 | }; | 194 | }; |
184 | static struct cci_pmu_drv_data *pmu; | 195 | static struct cci_pmu *pmu; |
196 | |||
197 | #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) | ||
185 | 198 | ||
186 | static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) | 199 | static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) |
187 | { | 200 | { |
@@ -252,7 +265,7 @@ static int pmu_validate_hw_event(u8 hw_event) | |||
252 | return -ENOENT; | 265 | return -ENOENT; |
253 | } | 266 | } |
254 | 267 | ||
255 | static int pmu_is_valid_counter(struct arm_pmu *cci_pmu, int idx) | 268 | static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) |
256 | { | 269 | { |
257 | return CCI_PMU_CYCLE_CNTR_IDX <= idx && | 270 | return CCI_PMU_CYCLE_CNTR_IDX <= idx && |
258 | idx <= CCI_PMU_CNTR_LAST(cci_pmu); | 271 | idx <= CCI_PMU_CNTR_LAST(cci_pmu); |
@@ -293,14 +306,9 @@ static u32 pmu_get_max_counters(void) | |||
293 | return n_cnts + 1; | 306 | return n_cnts + 1; |
294 | } | 307 | } |
295 | 308 | ||
296 | static struct pmu_hw_events *pmu_get_hw_events(void) | 309 | static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) |
297 | { | 310 | { |
298 | return &pmu->hw_events; | 311 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
299 | } | ||
300 | |||
301 | static int pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event) | ||
302 | { | ||
303 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
304 | struct hw_perf_event *hw_event = &event->hw; | 312 | struct hw_perf_event *hw_event = &event->hw; |
305 | unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK; | 313 | unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK; |
306 | int idx; | 314 | int idx; |
@@ -336,7 +344,7 @@ static int pmu_map_event(struct perf_event *event) | |||
336 | return mapping; | 344 | return mapping; |
337 | } | 345 | } |
338 | 346 | ||
339 | static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler) | 347 | static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) |
340 | { | 348 | { |
341 | int i; | 349 | int i; |
342 | struct platform_device *pmu_device = cci_pmu->plat_device; | 350 | struct platform_device *pmu_device = cci_pmu->plat_device; |
@@ -371,17 +379,91 @@ static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler) | |||
371 | return 0; | 379 | return 0; |
372 | } | 380 | } |
373 | 381 | ||
382 | static void pmu_free_irq(struct cci_pmu *cci_pmu) | ||
383 | { | ||
384 | int i; | ||
385 | |||
386 | for (i = 0; i < pmu->nr_irqs; i++) { | ||
387 | if (!test_and_clear_bit(i, &pmu->active_irqs)) | ||
388 | continue; | ||
389 | |||
390 | free_irq(pmu->irqs[i], cci_pmu); | ||
391 | } | ||
392 | } | ||
393 | |||
394 | static u32 pmu_read_counter(struct perf_event *event) | ||
395 | { | ||
396 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | ||
397 | struct hw_perf_event *hw_counter = &event->hw; | ||
398 | int idx = hw_counter->idx; | ||
399 | u32 value; | ||
400 | |||
401 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | ||
402 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
403 | return 0; | ||
404 | } | ||
405 | value = pmu_read_register(idx, CCI_PMU_CNTR); | ||
406 | |||
407 | return value; | ||
408 | } | ||
409 | |||
410 | static void pmu_write_counter(struct perf_event *event, u32 value) | ||
411 | { | ||
412 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | ||
413 | struct hw_perf_event *hw_counter = &event->hw; | ||
414 | int idx = hw_counter->idx; | ||
415 | |||
416 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) | ||
417 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
418 | else | ||
419 | pmu_write_register(value, idx, CCI_PMU_CNTR); | ||
420 | } | ||
421 | |||
422 | static u64 pmu_event_update(struct perf_event *event) | ||
423 | { | ||
424 | struct hw_perf_event *hwc = &event->hw; | ||
425 | u64 delta, prev_raw_count, new_raw_count; | ||
426 | |||
427 | do { | ||
428 | prev_raw_count = local64_read(&hwc->prev_count); | ||
429 | new_raw_count = pmu_read_counter(event); | ||
430 | } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
431 | new_raw_count) != prev_raw_count); | ||
432 | |||
433 | delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK; | ||
434 | |||
435 | local64_add(delta, &event->count); | ||
436 | |||
437 | return new_raw_count; | ||
438 | } | ||
439 | |||
440 | static void pmu_read(struct perf_event *event) | ||
441 | { | ||
442 | pmu_event_update(event); | ||
443 | } | ||
444 | |||
445 | void pmu_event_set_period(struct perf_event *event) | ||
446 | { | ||
447 | struct hw_perf_event *hwc = &event->hw; | ||
448 | /* | ||
449 | * The CCI PMU counters have a period of 2^32. To account for the | ||
450 | * possiblity of extreme interrupt latency we program for a period of | ||
451 | * half that. Hopefully we can handle the interrupt before another 2^31 | ||
452 | * events occur and the counter overtakes its previous value. | ||
453 | */ | ||
454 | u64 val = 1ULL << 31; | ||
455 | local64_set(&hwc->prev_count, val); | ||
456 | pmu_write_counter(event, val); | ||
457 | } | ||
458 | |||
374 | static irqreturn_t pmu_handle_irq(int irq_num, void *dev) | 459 | static irqreturn_t pmu_handle_irq(int irq_num, void *dev) |
375 | { | 460 | { |
376 | unsigned long flags; | 461 | unsigned long flags; |
377 | struct arm_pmu *cci_pmu = (struct arm_pmu *)dev; | 462 | struct cci_pmu *cci_pmu = dev; |
378 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | 463 | struct cci_pmu_hw_events *events = &pmu->hw_events; |
379 | struct perf_sample_data data; | ||
380 | struct pt_regs *regs; | ||
381 | int idx, handled = IRQ_NONE; | 464 | int idx, handled = IRQ_NONE; |
382 | 465 | ||
383 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 466 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
384 | regs = get_irq_regs(); | ||
385 | /* | 467 | /* |
386 | * Iterate over counters and update the corresponding perf events. | 468 | * Iterate over counters and update the corresponding perf events. |
387 | * This should work regardless of whether we have per-counter overflow | 469 | * This should work regardless of whether we have per-counter overflow |
@@ -403,154 +485,407 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev) | |||
403 | 485 | ||
404 | pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW); | 486 | pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW); |
405 | 487 | ||
488 | pmu_event_update(event); | ||
489 | pmu_event_set_period(event); | ||
406 | handled = IRQ_HANDLED; | 490 | handled = IRQ_HANDLED; |
407 | |||
408 | armpmu_event_update(event); | ||
409 | perf_sample_data_init(&data, 0, hw_counter->last_period); | ||
410 | if (!armpmu_event_set_period(event)) | ||
411 | continue; | ||
412 | |||
413 | if (perf_event_overflow(event, &data, regs)) | ||
414 | cci_pmu->disable(event); | ||
415 | } | 491 | } |
416 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 492 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
417 | 493 | ||
418 | return IRQ_RETVAL(handled); | 494 | return IRQ_RETVAL(handled); |
419 | } | 495 | } |
420 | 496 | ||
421 | static void pmu_free_irq(struct arm_pmu *cci_pmu) | 497 | static int cci_pmu_get_hw(struct cci_pmu *cci_pmu) |
422 | { | 498 | { |
423 | int i; | 499 | int ret = pmu_request_irq(cci_pmu, pmu_handle_irq); |
500 | if (ret) { | ||
501 | pmu_free_irq(cci_pmu); | ||
502 | return ret; | ||
503 | } | ||
504 | return 0; | ||
505 | } | ||
424 | 506 | ||
425 | for (i = 0; i < pmu->nr_irqs; i++) { | 507 | static void cci_pmu_put_hw(struct cci_pmu *cci_pmu) |
426 | if (!test_and_clear_bit(i, &pmu->active_irqs)) | 508 | { |
427 | continue; | 509 | pmu_free_irq(cci_pmu); |
510 | } | ||
428 | 511 | ||
429 | free_irq(pmu->irqs[i], cci_pmu); | 512 | static void hw_perf_event_destroy(struct perf_event *event) |
513 | { | ||
514 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | ||
515 | atomic_t *active_events = &cci_pmu->active_events; | ||
516 | struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; | ||
517 | |||
518 | if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) { | ||
519 | cci_pmu_put_hw(cci_pmu); | ||
520 | mutex_unlock(reserve_mutex); | ||
430 | } | 521 | } |
431 | } | 522 | } |
432 | 523 | ||
433 | static void pmu_enable_event(struct perf_event *event) | 524 | static void cci_pmu_enable(struct pmu *pmu) |
434 | { | 525 | { |
526 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | ||
527 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | ||
528 | int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_events); | ||
435 | unsigned long flags; | 529 | unsigned long flags; |
436 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | 530 | u32 val; |
437 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | 531 | |
438 | struct hw_perf_event *hw_counter = &event->hw; | 532 | if (!enabled) |
439 | int idx = hw_counter->idx; | 533 | return; |
534 | |||
535 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | ||
536 | |||
537 | /* Enable all the PMU counters. */ | ||
538 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; | ||
539 | writel(val, cci_ctrl_base + CCI_PMCR); | ||
540 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | ||
541 | |||
542 | } | ||
543 | |||
544 | static void cci_pmu_disable(struct pmu *pmu) | ||
545 | { | ||
546 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | ||
547 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | ||
548 | unsigned long flags; | ||
549 | u32 val; | ||
550 | |||
551 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | ||
552 | |||
553 | /* Disable all the PMU counters. */ | ||
554 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; | ||
555 | writel(val, cci_ctrl_base + CCI_PMCR); | ||
556 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | ||
557 | } | ||
558 | |||
559 | static void cci_pmu_start(struct perf_event *event, int pmu_flags) | ||
560 | { | ||
561 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | ||
562 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | ||
563 | struct hw_perf_event *hwc = &event->hw; | ||
564 | int idx = hwc->idx; | ||
565 | unsigned long flags; | ||
566 | |||
567 | /* | ||
568 | * To handle interrupt latency, we always reprogram the period | ||
569 | * regardlesss of PERF_EF_RELOAD. | ||
570 | */ | ||
571 | if (pmu_flags & PERF_EF_RELOAD) | ||
572 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
573 | |||
574 | hwc->state = 0; | ||
440 | 575 | ||
441 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | 576 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { |
442 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | 577 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); |
443 | return; | 578 | return; |
444 | } | 579 | } |
445 | 580 | ||
446 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 581 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); |
447 | 582 | ||
448 | /* Configure the event to count, unless you are counting cycles */ | 583 | /* Configure the event to count, unless you are counting cycles */ |
449 | if (idx != CCI_PMU_CYCLE_CNTR_IDX) | 584 | if (idx != CCI_PMU_CYCLE_CNTR_IDX) |
450 | pmu_set_event(idx, hw_counter->config_base); | 585 | pmu_set_event(idx, hwc->config_base); |
451 | 586 | ||
587 | pmu_event_set_period(event); | ||
452 | pmu_enable_counter(idx); | 588 | pmu_enable_counter(idx); |
453 | 589 | ||
454 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 590 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); |
455 | } | 591 | } |
456 | 592 | ||
457 | static void pmu_disable_event(struct perf_event *event) | 593 | static void cci_pmu_stop(struct perf_event *event, int pmu_flags) |
458 | { | 594 | { |
459 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | 595 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
460 | struct hw_perf_event *hw_counter = &event->hw; | 596 | struct hw_perf_event *hwc = &event->hw; |
461 | int idx = hw_counter->idx; | 597 | int idx = hwc->idx; |
598 | |||
599 | if (hwc->state & PERF_HES_STOPPED) | ||
600 | return; | ||
462 | 601 | ||
463 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | 602 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { |
464 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | 603 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); |
465 | return; | 604 | return; |
466 | } | 605 | } |
467 | 606 | ||
607 | /* | ||
608 | * We always reprogram the counter, so ignore PERF_EF_UPDATE. See | ||
609 | * cci_pmu_start() | ||
610 | */ | ||
468 | pmu_disable_counter(idx); | 611 | pmu_disable_counter(idx); |
612 | pmu_event_update(event); | ||
613 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
469 | } | 614 | } |
470 | 615 | ||
471 | static void pmu_start(struct arm_pmu *cci_pmu) | 616 | static int cci_pmu_add(struct perf_event *event, int flags) |
472 | { | 617 | { |
473 | u32 val; | 618 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
474 | unsigned long flags; | 619 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; |
475 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | 620 | struct hw_perf_event *hwc = &event->hw; |
621 | int idx; | ||
622 | int err = 0; | ||
476 | 623 | ||
477 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 624 | perf_pmu_disable(event->pmu); |
478 | 625 | ||
479 | /* Enable all the PMU counters. */ | 626 | /* If we don't have a space for the counter then finish early. */ |
480 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; | 627 | idx = pmu_get_event_idx(hw_events, event); |
481 | writel(val, cci_ctrl_base + CCI_PMCR); | 628 | if (idx < 0) { |
629 | err = idx; | ||
630 | goto out; | ||
631 | } | ||
482 | 632 | ||
483 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 633 | event->hw.idx = idx; |
634 | hw_events->events[idx] = event; | ||
635 | |||
636 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
637 | if (flags & PERF_EF_START) | ||
638 | cci_pmu_start(event, PERF_EF_RELOAD); | ||
639 | |||
640 | /* Propagate our changes to the userspace mapping. */ | ||
641 | perf_event_update_userpage(event); | ||
642 | |||
643 | out: | ||
644 | perf_pmu_enable(event->pmu); | ||
645 | return err; | ||
484 | } | 646 | } |
485 | 647 | ||
486 | static void pmu_stop(struct arm_pmu *cci_pmu) | 648 | static void cci_pmu_del(struct perf_event *event, int flags) |
487 | { | 649 | { |
488 | u32 val; | 650 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
489 | unsigned long flags; | 651 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; |
490 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | 652 | struct hw_perf_event *hwc = &event->hw; |
653 | int idx = hwc->idx; | ||
491 | 654 | ||
492 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 655 | cci_pmu_stop(event, PERF_EF_UPDATE); |
656 | hw_events->events[idx] = NULL; | ||
657 | clear_bit(idx, hw_events->used_mask); | ||
493 | 658 | ||
494 | /* Disable all the PMU counters. */ | 659 | perf_event_update_userpage(event); |
495 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; | 660 | } |
496 | writel(val, cci_ctrl_base + CCI_PMCR); | ||
497 | 661 | ||
498 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 662 | static int |
663 | validate_event(struct cci_pmu_hw_events *hw_events, | ||
664 | struct perf_event *event) | ||
665 | { | ||
666 | if (is_software_event(event)) | ||
667 | return 1; | ||
668 | |||
669 | if (event->state < PERF_EVENT_STATE_OFF) | ||
670 | return 1; | ||
671 | |||
672 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | ||
673 | return 1; | ||
674 | |||
675 | return pmu_get_event_idx(hw_events, event) >= 0; | ||
499 | } | 676 | } |
500 | 677 | ||
501 | static u32 pmu_read_counter(struct perf_event *event) | 678 | static int |
679 | validate_group(struct perf_event *event) | ||
502 | { | 680 | { |
503 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | 681 | struct perf_event *sibling, *leader = event->group_leader; |
504 | struct hw_perf_event *hw_counter = &event->hw; | 682 | struct cci_pmu_hw_events fake_pmu = { |
505 | int idx = hw_counter->idx; | 683 | /* |
506 | u32 value; | 684 | * Initialise the fake PMU. We only need to populate the |
685 | * used_mask for the purposes of validation. | ||
686 | */ | ||
687 | .used_mask = CPU_BITS_NONE, | ||
688 | }; | ||
507 | 689 | ||
508 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | 690 | if (!validate_event(&fake_pmu, leader)) |
509 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | 691 | return -EINVAL; |
510 | return 0; | 692 | |
693 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | ||
694 | if (!validate_event(&fake_pmu, sibling)) | ||
695 | return -EINVAL; | ||
511 | } | 696 | } |
512 | value = pmu_read_register(idx, CCI_PMU_CNTR); | ||
513 | 697 | ||
514 | return value; | 698 | if (!validate_event(&fake_pmu, event)) |
699 | return -EINVAL; | ||
700 | |||
701 | return 0; | ||
515 | } | 702 | } |
516 | 703 | ||
517 | static void pmu_write_counter(struct perf_event *event, u32 value) | 704 | static int |
705 | __hw_perf_event_init(struct perf_event *event) | ||
518 | { | 706 | { |
519 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | 707 | struct hw_perf_event *hwc = &event->hw; |
520 | struct hw_perf_event *hw_counter = &event->hw; | 708 | int mapping; |
521 | int idx = hw_counter->idx; | ||
522 | 709 | ||
523 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) | 710 | mapping = pmu_map_event(event); |
524 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | 711 | |
525 | else | 712 | if (mapping < 0) { |
526 | pmu_write_register(value, idx, CCI_PMU_CNTR); | 713 | pr_debug("event %x:%llx not supported\n", event->attr.type, |
714 | event->attr.config); | ||
715 | return mapping; | ||
716 | } | ||
717 | |||
718 | /* | ||
719 | * We don't assign an index until we actually place the event onto | ||
720 | * hardware. Use -1 to signify that we haven't decided where to put it | ||
721 | * yet. | ||
722 | */ | ||
723 | hwc->idx = -1; | ||
724 | hwc->config_base = 0; | ||
725 | hwc->config = 0; | ||
726 | hwc->event_base = 0; | ||
727 | |||
728 | /* | ||
729 | * Store the event encoding into the config_base field. | ||
730 | */ | ||
731 | hwc->config_base |= (unsigned long)mapping; | ||
732 | |||
733 | /* | ||
734 | * Limit the sample_period to half of the counter width. That way, the | ||
735 | * new counter value is far less likely to overtake the previous one | ||
736 | * unless you have some serious IRQ latency issues. | ||
737 | */ | ||
738 | hwc->sample_period = CCI_PMU_CNTR_MASK >> 1; | ||
739 | hwc->last_period = hwc->sample_period; | ||
740 | local64_set(&hwc->period_left, hwc->sample_period); | ||
741 | |||
742 | if (event->group_leader != event) { | ||
743 | if (validate_group(event) != 0) | ||
744 | return -EINVAL; | ||
745 | } | ||
746 | |||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | static int cci_pmu_event_init(struct perf_event *event) | ||
751 | { | ||
752 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | ||
753 | atomic_t *active_events = &cci_pmu->active_events; | ||
754 | int err = 0; | ||
755 | int cpu; | ||
756 | |||
757 | if (event->attr.type != event->pmu->type) | ||
758 | return -ENOENT; | ||
759 | |||
760 | /* Shared by all CPUs, no meaningful state to sample */ | ||
761 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | ||
762 | return -EOPNOTSUPP; | ||
763 | |||
764 | /* We have no filtering of any kind */ | ||
765 | if (event->attr.exclude_user || | ||
766 | event->attr.exclude_kernel || | ||
767 | event->attr.exclude_hv || | ||
768 | event->attr.exclude_idle || | ||
769 | event->attr.exclude_host || | ||
770 | event->attr.exclude_guest) | ||
771 | return -EINVAL; | ||
772 | |||
773 | /* | ||
774 | * Following the example set by other "uncore" PMUs, we accept any CPU | ||
775 | * and rewrite its affinity dynamically rather than having perf core | ||
776 | * handle cpu == -1 and pid == -1 for this case. | ||
777 | * | ||
778 | * The perf core will pin online CPUs for the duration of this call and | ||
779 | * the event being installed into its context, so the PMU's CPU can't | ||
780 | * change under our feet. | ||
781 | */ | ||
782 | cpu = cpumask_first(&cci_pmu->cpus); | ||
783 | if (event->cpu < 0 || cpu < 0) | ||
784 | return -EINVAL; | ||
785 | event->cpu = cpu; | ||
786 | |||
787 | event->destroy = hw_perf_event_destroy; | ||
788 | if (!atomic_inc_not_zero(active_events)) { | ||
789 | mutex_lock(&cci_pmu->reserve_mutex); | ||
790 | if (atomic_read(active_events) == 0) | ||
791 | err = cci_pmu_get_hw(cci_pmu); | ||
792 | if (!err) | ||
793 | atomic_inc(active_events); | ||
794 | mutex_unlock(&cci_pmu->reserve_mutex); | ||
795 | } | ||
796 | if (err) | ||
797 | return err; | ||
798 | |||
799 | err = __hw_perf_event_init(event); | ||
800 | if (err) | ||
801 | hw_perf_event_destroy(event); | ||
802 | |||
803 | return err; | ||
804 | } | ||
805 | |||
806 | static ssize_t pmu_attr_cpumask_show(struct device *dev, | ||
807 | struct device_attribute *attr, char *buf) | ||
808 | { | ||
809 | int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &pmu->cpus); | ||
810 | |||
811 | buf[n++] = '\n'; | ||
812 | buf[n] = '\0'; | ||
813 | return n; | ||
527 | } | 814 | } |
528 | 815 | ||
529 | static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev) | 816 | static DEVICE_ATTR(cpumask, S_IRUGO, pmu_attr_cpumask_show, NULL); |
817 | |||
818 | static struct attribute *pmu_attrs[] = { | ||
819 | &dev_attr_cpumask.attr, | ||
820 | NULL, | ||
821 | }; | ||
822 | |||
823 | static struct attribute_group pmu_attr_group = { | ||
824 | .attrs = pmu_attrs, | ||
825 | }; | ||
826 | |||
827 | static const struct attribute_group *pmu_attr_groups[] = { | ||
828 | &pmu_attr_group, | ||
829 | NULL | ||
830 | }; | ||
831 | |||
832 | static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) | ||
530 | { | 833 | { |
531 | *cci_pmu = (struct arm_pmu){ | 834 | char *name = pmu_names[probe_cci_revision()]; |
532 | .name = pmu_names[probe_cci_revision()], | 835 | cci_pmu->pmu = (struct pmu) { |
533 | .max_period = (1LLU << 32) - 1, | 836 | .name = pmu_names[probe_cci_revision()], |
534 | .get_hw_events = pmu_get_hw_events, | 837 | .task_ctx_nr = perf_invalid_context, |
535 | .get_event_idx = pmu_get_event_idx, | 838 | .pmu_enable = cci_pmu_enable, |
536 | .map_event = pmu_map_event, | 839 | .pmu_disable = cci_pmu_disable, |
537 | .request_irq = pmu_request_irq, | 840 | .event_init = cci_pmu_event_init, |
538 | .handle_irq = pmu_handle_irq, | 841 | .add = cci_pmu_add, |
539 | .free_irq = pmu_free_irq, | 842 | .del = cci_pmu_del, |
540 | .enable = pmu_enable_event, | 843 | .start = cci_pmu_start, |
541 | .disable = pmu_disable_event, | 844 | .stop = cci_pmu_stop, |
542 | .start = pmu_start, | 845 | .read = pmu_read, |
543 | .stop = pmu_stop, | 846 | .attr_groups = pmu_attr_groups, |
544 | .read_counter = pmu_read_counter, | ||
545 | .write_counter = pmu_write_counter, | ||
546 | }; | 847 | }; |
547 | 848 | ||
548 | cci_pmu->plat_device = pdev; | 849 | cci_pmu->plat_device = pdev; |
549 | cci_pmu->num_events = pmu_get_max_counters(); | 850 | cci_pmu->num_events = pmu_get_max_counters(); |
550 | 851 | ||
551 | return armpmu_register(cci_pmu, -1); | 852 | return perf_pmu_register(&cci_pmu->pmu, name, -1); |
853 | } | ||
854 | |||
855 | static int cci_pmu_cpu_notifier(struct notifier_block *self, | ||
856 | unsigned long action, void *hcpu) | ||
857 | { | ||
858 | unsigned int cpu = (long)hcpu; | ||
859 | unsigned int target; | ||
860 | |||
861 | switch (action & ~CPU_TASKS_FROZEN) { | ||
862 | case CPU_DOWN_PREPARE: | ||
863 | if (!cpumask_test_and_clear_cpu(cpu, &pmu->cpus)) | ||
864 | break; | ||
865 | target = cpumask_any_but(cpu_online_mask, cpu); | ||
866 | if (target < 0) // UP, last CPU | ||
867 | break; | ||
868 | /* | ||
869 | * TODO: migrate context once core races on event->ctx have | ||
870 | * been fixed. | ||
871 | */ | ||
872 | cpumask_set_cpu(target, &pmu->cpus); | ||
873 | default: | ||
874 | break; | ||
875 | } | ||
876 | |||
877 | return NOTIFY_OK; | ||
552 | } | 878 | } |
553 | 879 | ||
880 | static struct notifier_block cci_pmu_cpu_nb = { | ||
881 | .notifier_call = cci_pmu_cpu_notifier, | ||
882 | /* | ||
883 | * to migrate uncore events, our notifier should be executed | ||
884 | * before perf core's notifier. | ||
885 | */ | ||
886 | .priority = CPU_PRI_PERF + 1, | ||
887 | }; | ||
888 | |||
554 | static const struct of_device_id arm_cci_pmu_matches[] = { | 889 | static const struct of_device_id arm_cci_pmu_matches[] = { |
555 | { | 890 | { |
556 | .compatible = "arm,cci-400-pmu", | 891 | .compatible = "arm,cci-400-pmu", |
@@ -604,15 +939,16 @@ static int cci_pmu_probe(struct platform_device *pdev) | |||
604 | return -EINVAL; | 939 | return -EINVAL; |
605 | } | 940 | } |
606 | 941 | ||
607 | pmu->cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*(pmu->cci_pmu)), GFP_KERNEL); | ||
608 | if (!pmu->cci_pmu) | ||
609 | return -ENOMEM; | ||
610 | |||
611 | pmu->hw_events.events = pmu->events; | ||
612 | pmu->hw_events.used_mask = pmu->used_mask; | ||
613 | raw_spin_lock_init(&pmu->hw_events.pmu_lock); | 942 | raw_spin_lock_init(&pmu->hw_events.pmu_lock); |
943 | mutex_init(&pmu->reserve_mutex); | ||
944 | atomic_set(&pmu->active_events, 0); | ||
945 | cpumask_set_cpu(smp_processor_id(), &pmu->cpus); | ||
614 | 946 | ||
615 | ret = cci_pmu_init(pmu->cci_pmu, pdev); | 947 | ret = register_cpu_notifier(&cci_pmu_cpu_nb); |
948 | if (ret) | ||
949 | return ret; | ||
950 | |||
951 | ret = cci_pmu_init(pmu, pdev); | ||
616 | if (ret) | 952 | if (ret) |
617 | return ret; | 953 | return ret; |
618 | 954 | ||
@@ -976,6 +1312,9 @@ static int cci_probe(void) | |||
976 | if (!np) | 1312 | if (!np) |
977 | return -ENODEV; | 1313 | return -ENODEV; |
978 | 1314 | ||
1315 | if (!of_device_is_available(np)) | ||
1316 | return -ENODEV; | ||
1317 | |||
979 | cci_config = of_match_node(arm_cci_matches, np)->data; | 1318 | cci_config = of_match_node(arm_cci_matches, np)->data; |
980 | if (!cci_config) | 1319 | if (!cci_config) |
981 | return -ENODEV; | 1320 | return -ENODEV; |
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c index f2cd6a2d40b4..738612c45266 100644 --- a/drivers/bus/brcmstb_gisb.c +++ b/drivers/bus/brcmstb_gisb.c | |||
@@ -23,35 +23,103 @@ | |||
23 | #include <linux/list.h> | 23 | #include <linux/list.h> |
24 | #include <linux/of.h> | 24 | #include <linux/of.h> |
25 | #include <linux/bitops.h> | 25 | #include <linux/bitops.h> |
26 | #include <linux/pm.h> | ||
26 | 27 | ||
28 | #ifdef CONFIG_ARM | ||
27 | #include <asm/bug.h> | 29 | #include <asm/bug.h> |
28 | #include <asm/signal.h> | 30 | #include <asm/signal.h> |
31 | #endif | ||
29 | 32 | ||
30 | #define ARB_TIMER 0x008 | ||
31 | #define ARB_ERR_CAP_CLR 0x7e4 | ||
32 | #define ARB_ERR_CAP_CLEAR (1 << 0) | 33 | #define ARB_ERR_CAP_CLEAR (1 << 0) |
33 | #define ARB_ERR_CAP_HI_ADDR 0x7e8 | ||
34 | #define ARB_ERR_CAP_ADDR 0x7ec | ||
35 | #define ARB_ERR_CAP_DATA 0x7f0 | ||
36 | #define ARB_ERR_CAP_STATUS 0x7f4 | ||
37 | #define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12) | 34 | #define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12) |
38 | #define ARB_ERR_CAP_STATUS_TEA (1 << 11) | 35 | #define ARB_ERR_CAP_STATUS_TEA (1 << 11) |
39 | #define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2) | 36 | #define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2) |
40 | #define ARB_ERR_CAP_STATUS_BS_MASK 0x3c | 37 | #define ARB_ERR_CAP_STATUS_BS_MASK 0x3c |
41 | #define ARB_ERR_CAP_STATUS_WRITE (1 << 1) | 38 | #define ARB_ERR_CAP_STATUS_WRITE (1 << 1) |
42 | #define ARB_ERR_CAP_STATUS_VALID (1 << 0) | 39 | #define ARB_ERR_CAP_STATUS_VALID (1 << 0) |
43 | #define ARB_ERR_CAP_MASTER 0x7f8 | 40 | |
41 | enum { | ||
42 | ARB_TIMER, | ||
43 | ARB_ERR_CAP_CLR, | ||
44 | ARB_ERR_CAP_HI_ADDR, | ||
45 | ARB_ERR_CAP_ADDR, | ||
46 | ARB_ERR_CAP_DATA, | ||
47 | ARB_ERR_CAP_STATUS, | ||
48 | ARB_ERR_CAP_MASTER, | ||
49 | }; | ||
50 | |||
51 | static const int gisb_offsets_bcm7038[] = { | ||
52 | [ARB_TIMER] = 0x00c, | ||
53 | [ARB_ERR_CAP_CLR] = 0x0c4, | ||
54 | [ARB_ERR_CAP_HI_ADDR] = -1, | ||
55 | [ARB_ERR_CAP_ADDR] = 0x0c8, | ||
56 | [ARB_ERR_CAP_DATA] = 0x0cc, | ||
57 | [ARB_ERR_CAP_STATUS] = 0x0d0, | ||
58 | [ARB_ERR_CAP_MASTER] = -1, | ||
59 | }; | ||
60 | |||
61 | static const int gisb_offsets_bcm7400[] = { | ||
62 | [ARB_TIMER] = 0x00c, | ||
63 | [ARB_ERR_CAP_CLR] = 0x0c8, | ||
64 | [ARB_ERR_CAP_HI_ADDR] = -1, | ||
65 | [ARB_ERR_CAP_ADDR] = 0x0cc, | ||
66 | [ARB_ERR_CAP_DATA] = 0x0d0, | ||
67 | [ARB_ERR_CAP_STATUS] = 0x0d4, | ||
68 | [ARB_ERR_CAP_MASTER] = 0x0d8, | ||
69 | }; | ||
70 | |||
71 | static const int gisb_offsets_bcm7435[] = { | ||
72 | [ARB_TIMER] = 0x00c, | ||
73 | [ARB_ERR_CAP_CLR] = 0x168, | ||
74 | [ARB_ERR_CAP_HI_ADDR] = -1, | ||
75 | [ARB_ERR_CAP_ADDR] = 0x16c, | ||
76 | [ARB_ERR_CAP_DATA] = 0x170, | ||
77 | [ARB_ERR_CAP_STATUS] = 0x174, | ||
78 | [ARB_ERR_CAP_MASTER] = 0x178, | ||
79 | }; | ||
80 | |||
81 | static const int gisb_offsets_bcm7445[] = { | ||
82 | [ARB_TIMER] = 0x008, | ||
83 | [ARB_ERR_CAP_CLR] = 0x7e4, | ||
84 | [ARB_ERR_CAP_HI_ADDR] = 0x7e8, | ||
85 | [ARB_ERR_CAP_ADDR] = 0x7ec, | ||
86 | [ARB_ERR_CAP_DATA] = 0x7f0, | ||
87 | [ARB_ERR_CAP_STATUS] = 0x7f4, | ||
88 | [ARB_ERR_CAP_MASTER] = 0x7f8, | ||
89 | }; | ||
44 | 90 | ||
45 | struct brcmstb_gisb_arb_device { | 91 | struct brcmstb_gisb_arb_device { |
46 | void __iomem *base; | 92 | void __iomem *base; |
93 | const int *gisb_offsets; | ||
47 | struct mutex lock; | 94 | struct mutex lock; |
48 | struct list_head next; | 95 | struct list_head next; |
49 | u32 valid_mask; | 96 | u32 valid_mask; |
50 | const char *master_names[sizeof(u32) * BITS_PER_BYTE]; | 97 | const char *master_names[sizeof(u32) * BITS_PER_BYTE]; |
98 | u32 saved_timeout; | ||
51 | }; | 99 | }; |
52 | 100 | ||
53 | static LIST_HEAD(brcmstb_gisb_arb_device_list); | 101 | static LIST_HEAD(brcmstb_gisb_arb_device_list); |
54 | 102 | ||
103 | static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg) | ||
104 | { | ||
105 | int offset = gdev->gisb_offsets[reg]; | ||
106 | |||
107 | /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */ | ||
108 | if (offset == -1) | ||
109 | return 1; | ||
110 | |||
111 | return ioread32(gdev->base + offset); | ||
112 | } | ||
113 | |||
114 | static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg) | ||
115 | { | ||
116 | int offset = gdev->gisb_offsets[reg]; | ||
117 | |||
118 | if (offset == -1) | ||
119 | return; | ||
120 | iowrite32(val, gdev->base + reg); | ||
121 | } | ||
122 | |||
55 | static ssize_t gisb_arb_get_timeout(struct device *dev, | 123 | static ssize_t gisb_arb_get_timeout(struct device *dev, |
56 | struct device_attribute *attr, | 124 | struct device_attribute *attr, |
57 | char *buf) | 125 | char *buf) |
@@ -61,7 +129,7 @@ static ssize_t gisb_arb_get_timeout(struct device *dev, | |||
61 | u32 timeout; | 129 | u32 timeout; |
62 | 130 | ||
63 | mutex_lock(&gdev->lock); | 131 | mutex_lock(&gdev->lock); |
64 | timeout = ioread32(gdev->base + ARB_TIMER); | 132 | timeout = gisb_read(gdev, ARB_TIMER); |
65 | mutex_unlock(&gdev->lock); | 133 | mutex_unlock(&gdev->lock); |
66 | 134 | ||
67 | return sprintf(buf, "%d", timeout); | 135 | return sprintf(buf, "%d", timeout); |
@@ -83,7 +151,7 @@ static ssize_t gisb_arb_set_timeout(struct device *dev, | |||
83 | return -EINVAL; | 151 | return -EINVAL; |
84 | 152 | ||
85 | mutex_lock(&gdev->lock); | 153 | mutex_lock(&gdev->lock); |
86 | iowrite32(val, gdev->base + ARB_TIMER); | 154 | gisb_write(gdev, val, ARB_TIMER); |
87 | mutex_unlock(&gdev->lock); | 155 | mutex_unlock(&gdev->lock); |
88 | 156 | ||
89 | return count; | 157 | return count; |
@@ -110,18 +178,18 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, | |||
110 | const char *m_name; | 178 | const char *m_name; |
111 | char m_fmt[11]; | 179 | char m_fmt[11]; |
112 | 180 | ||
113 | cap_status = ioread32(gdev->base + ARB_ERR_CAP_STATUS); | 181 | cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS); |
114 | 182 | ||
115 | /* Invalid captured address, bail out */ | 183 | /* Invalid captured address, bail out */ |
116 | if (!(cap_status & ARB_ERR_CAP_STATUS_VALID)) | 184 | if (!(cap_status & ARB_ERR_CAP_STATUS_VALID)) |
117 | return 1; | 185 | return 1; |
118 | 186 | ||
119 | /* Read the address and master */ | 187 | /* Read the address and master */ |
120 | arb_addr = ioread32(gdev->base + ARB_ERR_CAP_ADDR) & 0xffffffff; | 188 | arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff; |
121 | #if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) | 189 | #if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) |
122 | arb_addr |= (u64)ioread32(gdev->base + ARB_ERR_CAP_HI_ADDR) << 32; | 190 | arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32; |
123 | #endif | 191 | #endif |
124 | master = ioread32(gdev->base + ARB_ERR_CAP_MASTER); | 192 | master = gisb_read(gdev, ARB_ERR_CAP_MASTER); |
125 | 193 | ||
126 | m_name = brcmstb_gisb_master_to_str(gdev, master); | 194 | m_name = brcmstb_gisb_master_to_str(gdev, master); |
127 | if (!m_name) { | 195 | if (!m_name) { |
@@ -136,11 +204,12 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, | |||
136 | m_name); | 204 | m_name); |
137 | 205 | ||
138 | /* clear the GISB error */ | 206 | /* clear the GISB error */ |
139 | iowrite32(ARB_ERR_CAP_CLEAR, gdev->base + ARB_ERR_CAP_CLR); | 207 | gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR); |
140 | 208 | ||
141 | return 0; | 209 | return 0; |
142 | } | 210 | } |
143 | 211 | ||
212 | #ifdef CONFIG_ARM | ||
144 | static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr, | 213 | static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr, |
145 | struct pt_regs *regs) | 214 | struct pt_regs *regs) |
146 | { | 215 | { |
@@ -159,12 +228,7 @@ static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr, | |||
159 | 228 | ||
160 | return ret; | 229 | return ret; |
161 | } | 230 | } |
162 | 231 | #endif | |
163 | void __init brcmstb_hook_fault_code(void) | ||
164 | { | ||
165 | hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0, | ||
166 | "imprecise external abort"); | ||
167 | } | ||
168 | 232 | ||
169 | static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id) | 233 | static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id) |
170 | { | 234 | { |
@@ -192,10 +256,20 @@ static struct attribute_group gisb_arb_sysfs_attr_group = { | |||
192 | .attrs = gisb_arb_sysfs_attrs, | 256 | .attrs = gisb_arb_sysfs_attrs, |
193 | }; | 257 | }; |
194 | 258 | ||
195 | static int brcmstb_gisb_arb_probe(struct platform_device *pdev) | 259 | static const struct of_device_id brcmstb_gisb_arb_of_match[] = { |
260 | { .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 }, | ||
261 | { .compatible = "brcm,bcm7445-gisb-arb", .data = gisb_offsets_bcm7445 }, | ||
262 | { .compatible = "brcm,bcm7435-gisb-arb", .data = gisb_offsets_bcm7435 }, | ||
263 | { .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 }, | ||
264 | { .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 }, | ||
265 | { }, | ||
266 | }; | ||
267 | |||
268 | static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev) | ||
196 | { | 269 | { |
197 | struct device_node *dn = pdev->dev.of_node; | 270 | struct device_node *dn = pdev->dev.of_node; |
198 | struct brcmstb_gisb_arb_device *gdev; | 271 | struct brcmstb_gisb_arb_device *gdev; |
272 | const struct of_device_id *of_id; | ||
199 | struct resource *r; | 273 | struct resource *r; |
200 | int err, timeout_irq, tea_irq; | 274 | int err, timeout_irq, tea_irq; |
201 | unsigned int num_masters, j = 0; | 275 | unsigned int num_masters, j = 0; |
@@ -216,6 +290,13 @@ static int brcmstb_gisb_arb_probe(struct platform_device *pdev) | |||
216 | if (IS_ERR(gdev->base)) | 290 | if (IS_ERR(gdev->base)) |
217 | return PTR_ERR(gdev->base); | 291 | return PTR_ERR(gdev->base); |
218 | 292 | ||
293 | of_id = of_match_node(brcmstb_gisb_arb_of_match, dn); | ||
294 | if (!of_id) { | ||
295 | pr_err("failed to look up compatible string\n"); | ||
296 | return -EINVAL; | ||
297 | } | ||
298 | gdev->gisb_offsets = of_id->data; | ||
299 | |||
219 | err = devm_request_irq(&pdev->dev, timeout_irq, | 300 | err = devm_request_irq(&pdev->dev, timeout_irq, |
220 | brcmstb_gisb_timeout_handler, 0, pdev->name, | 301 | brcmstb_gisb_timeout_handler, 0, pdev->name, |
221 | gdev); | 302 | gdev); |
@@ -261,29 +342,62 @@ static int brcmstb_gisb_arb_probe(struct platform_device *pdev) | |||
261 | 342 | ||
262 | list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list); | 343 | list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list); |
263 | 344 | ||
345 | #ifdef CONFIG_ARM | ||
346 | hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0, | ||
347 | "imprecise external abort"); | ||
348 | #endif | ||
349 | |||
264 | dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n", | 350 | dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n", |
265 | gdev->base, timeout_irq, tea_irq); | 351 | gdev->base, timeout_irq, tea_irq); |
266 | 352 | ||
267 | return 0; | 353 | return 0; |
268 | } | 354 | } |
269 | 355 | ||
270 | static const struct of_device_id brcmstb_gisb_arb_of_match[] = { | 356 | #ifdef CONFIG_PM_SLEEP |
271 | { .compatible = "brcm,gisb-arb" }, | 357 | static int brcmstb_gisb_arb_suspend(struct device *dev) |
272 | { }, | 358 | { |
359 | struct platform_device *pdev = to_platform_device(dev); | ||
360 | struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); | ||
361 | |||
362 | gdev->saved_timeout = gisb_read(gdev, ARB_TIMER); | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | /* Make sure we provide the same timeout value that was configured before, and | ||
368 | * do this before the GISB timeout interrupt handler has any chance to run. | ||
369 | */ | ||
370 | static int brcmstb_gisb_arb_resume_noirq(struct device *dev) | ||
371 | { | ||
372 | struct platform_device *pdev = to_platform_device(dev); | ||
373 | struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); | ||
374 | |||
375 | gisb_write(gdev, gdev->saved_timeout, ARB_TIMER); | ||
376 | |||
377 | return 0; | ||
378 | } | ||
379 | #else | ||
380 | #define brcmstb_gisb_arb_suspend NULL | ||
381 | #define brcmstb_gisb_arb_resume_noirq NULL | ||
382 | #endif | ||
383 | |||
384 | static const struct dev_pm_ops brcmstb_gisb_arb_pm_ops = { | ||
385 | .suspend = brcmstb_gisb_arb_suspend, | ||
386 | .resume_noirq = brcmstb_gisb_arb_resume_noirq, | ||
273 | }; | 387 | }; |
274 | 388 | ||
275 | static struct platform_driver brcmstb_gisb_arb_driver = { | 389 | static struct platform_driver brcmstb_gisb_arb_driver = { |
276 | .probe = brcmstb_gisb_arb_probe, | ||
277 | .driver = { | 390 | .driver = { |
278 | .name = "brcm-gisb-arb", | 391 | .name = "brcm-gisb-arb", |
279 | .owner = THIS_MODULE, | ||
280 | .of_match_table = brcmstb_gisb_arb_of_match, | 392 | .of_match_table = brcmstb_gisb_arb_of_match, |
393 | .pm = &brcmstb_gisb_arb_pm_ops, | ||
281 | }, | 394 | }, |
282 | }; | 395 | }; |
283 | 396 | ||
284 | static int __init brcm_gisb_driver_init(void) | 397 | static int __init brcm_gisb_driver_init(void) |
285 | { | 398 | { |
286 | return platform_driver_register(&brcmstb_gisb_arb_driver); | 399 | return platform_driver_probe(&brcmstb_gisb_arb_driver, |
400 | brcmstb_gisb_arb_probe); | ||
287 | } | 401 | } |
288 | 402 | ||
289 | module_init(brcm_gisb_driver_init); | 403 | module_init(brcm_gisb_driver_init); |
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index 75c9681f8021..0958b6981773 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c | |||
@@ -206,7 +206,6 @@ static int __init weim_probe(struct platform_device *pdev) | |||
206 | static struct platform_driver weim_driver = { | 206 | static struct platform_driver weim_driver = { |
207 | .driver = { | 207 | .driver = { |
208 | .name = "imx-weim", | 208 | .name = "imx-weim", |
209 | .owner = THIS_MODULE, | ||
210 | .of_match_table = weim_id_table, | 209 | .of_match_table = weim_id_table, |
211 | }, | 210 | }, |
212 | }; | 211 | }; |
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index 26c3779d871d..eb7682dc123b 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/of_address.h> | 57 | #include <linux/of_address.h> |
58 | #include <linux/debugfs.h> | 58 | #include <linux/debugfs.h> |
59 | #include <linux/log2.h> | 59 | #include <linux/log2.h> |
60 | #include <linux/syscore_ops.h> | ||
60 | 61 | ||
61 | /* | 62 | /* |
62 | * DDR target is the same on all platforms. | 63 | * DDR target is the same on all platforms. |
@@ -94,20 +95,42 @@ | |||
94 | 95 | ||
95 | #define DOVE_DDR_BASE_CS_OFF(n) ((n) << 4) | 96 | #define DOVE_DDR_BASE_CS_OFF(n) ((n) << 4) |
96 | 97 | ||
98 | /* Relative to mbusbridge_base */ | ||
99 | #define MBUS_BRIDGE_CTRL_OFF 0x0 | ||
100 | #define MBUS_BRIDGE_BASE_OFF 0x4 | ||
101 | |||
102 | /* Maximum number of windows, for all known platforms */ | ||
103 | #define MBUS_WINS_MAX 20 | ||
104 | |||
97 | struct mvebu_mbus_state; | 105 | struct mvebu_mbus_state; |
98 | 106 | ||
99 | struct mvebu_mbus_soc_data { | 107 | struct mvebu_mbus_soc_data { |
100 | unsigned int num_wins; | 108 | unsigned int num_wins; |
101 | unsigned int num_remappable_wins; | 109 | unsigned int num_remappable_wins; |
110 | bool has_mbus_bridge; | ||
102 | unsigned int (*win_cfg_offset)(const int win); | 111 | unsigned int (*win_cfg_offset)(const int win); |
103 | void (*setup_cpu_target)(struct mvebu_mbus_state *s); | 112 | void (*setup_cpu_target)(struct mvebu_mbus_state *s); |
113 | int (*save_cpu_target)(struct mvebu_mbus_state *s, | ||
114 | u32 *store_addr); | ||
104 | int (*show_cpu_target)(struct mvebu_mbus_state *s, | 115 | int (*show_cpu_target)(struct mvebu_mbus_state *s, |
105 | struct seq_file *seq, void *v); | 116 | struct seq_file *seq, void *v); |
106 | }; | 117 | }; |
107 | 118 | ||
119 | /* | ||
120 | * Used to store the state of one MBus window accross suspend/resume. | ||
121 | */ | ||
122 | struct mvebu_mbus_win_data { | ||
123 | u32 ctrl; | ||
124 | u32 base; | ||
125 | u32 remap_lo; | ||
126 | u32 remap_hi; | ||
127 | }; | ||
128 | |||
108 | struct mvebu_mbus_state { | 129 | struct mvebu_mbus_state { |
109 | void __iomem *mbuswins_base; | 130 | void __iomem *mbuswins_base; |
110 | void __iomem *sdramwins_base; | 131 | void __iomem *sdramwins_base; |
132 | void __iomem *mbusbridge_base; | ||
133 | phys_addr_t sdramwins_phys_base; | ||
111 | struct dentry *debugfs_root; | 134 | struct dentry *debugfs_root; |
112 | struct dentry *debugfs_sdram; | 135 | struct dentry *debugfs_sdram; |
113 | struct dentry *debugfs_devs; | 136 | struct dentry *debugfs_devs; |
@@ -115,6 +138,11 @@ struct mvebu_mbus_state { | |||
115 | struct resource pcie_io_aperture; | 138 | struct resource pcie_io_aperture; |
116 | const struct mvebu_mbus_soc_data *soc; | 139 | const struct mvebu_mbus_soc_data *soc; |
117 | int hw_io_coherency; | 140 | int hw_io_coherency; |
141 | |||
142 | /* Used during suspend/resume */ | ||
143 | u32 mbus_bridge_ctrl; | ||
144 | u32 mbus_bridge_base; | ||
145 | struct mvebu_mbus_win_data wins[MBUS_WINS_MAX]; | ||
118 | }; | 146 | }; |
119 | 147 | ||
120 | static struct mvebu_mbus_state mbus_state; | 148 | static struct mvebu_mbus_state mbus_state; |
@@ -516,6 +544,28 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) | |||
516 | mvebu_mbus_dram_info.num_cs = cs; | 544 | mvebu_mbus_dram_info.num_cs = cs; |
517 | } | 545 | } |
518 | 546 | ||
547 | static int | ||
548 | mvebu_mbus_default_save_cpu_target(struct mvebu_mbus_state *mbus, | ||
549 | u32 *store_addr) | ||
550 | { | ||
551 | int i; | ||
552 | |||
553 | for (i = 0; i < 4; i++) { | ||
554 | u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i)); | ||
555 | u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i)); | ||
556 | |||
557 | writel(mbus->sdramwins_phys_base + DDR_BASE_CS_OFF(i), | ||
558 | store_addr++); | ||
559 | writel(base, store_addr++); | ||
560 | writel(mbus->sdramwins_phys_base + DDR_SIZE_CS_OFF(i), | ||
561 | store_addr++); | ||
562 | writel(size, store_addr++); | ||
563 | } | ||
564 | |||
565 | /* We've written 16 words to the store address */ | ||
566 | return 16; | ||
567 | } | ||
568 | |||
519 | static void __init | 569 | static void __init |
520 | mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus) | 570 | mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus) |
521 | { | 571 | { |
@@ -546,10 +596,35 @@ mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus) | |||
546 | mvebu_mbus_dram_info.num_cs = cs; | 596 | mvebu_mbus_dram_info.num_cs = cs; |
547 | } | 597 | } |
548 | 598 | ||
599 | static int | ||
600 | mvebu_mbus_dove_save_cpu_target(struct mvebu_mbus_state *mbus, | ||
601 | u32 *store_addr) | ||
602 | { | ||
603 | int i; | ||
604 | |||
605 | for (i = 0; i < 2; i++) { | ||
606 | u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i)); | ||
607 | |||
608 | writel(mbus->sdramwins_phys_base + DOVE_DDR_BASE_CS_OFF(i), | ||
609 | store_addr++); | ||
610 | writel(map, store_addr++); | ||
611 | } | ||
612 | |||
613 | /* We've written 4 words to the store address */ | ||
614 | return 4; | ||
615 | } | ||
616 | |||
617 | int mvebu_mbus_save_cpu_target(u32 *store_addr) | ||
618 | { | ||
619 | return mbus_state.soc->save_cpu_target(&mbus_state, store_addr); | ||
620 | } | ||
621 | |||
549 | static const struct mvebu_mbus_soc_data armada_370_xp_mbus_data = { | 622 | static const struct mvebu_mbus_soc_data armada_370_xp_mbus_data = { |
550 | .num_wins = 20, | 623 | .num_wins = 20, |
551 | .num_remappable_wins = 8, | 624 | .num_remappable_wins = 8, |
625 | .has_mbus_bridge = true, | ||
552 | .win_cfg_offset = armada_370_xp_mbus_win_offset, | 626 | .win_cfg_offset = armada_370_xp_mbus_win_offset, |
627 | .save_cpu_target = mvebu_mbus_default_save_cpu_target, | ||
553 | .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, | 628 | .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, |
554 | .show_cpu_target = mvebu_sdram_debug_show_orion, | 629 | .show_cpu_target = mvebu_sdram_debug_show_orion, |
555 | }; | 630 | }; |
@@ -558,6 +633,7 @@ static const struct mvebu_mbus_soc_data kirkwood_mbus_data = { | |||
558 | .num_wins = 8, | 633 | .num_wins = 8, |
559 | .num_remappable_wins = 4, | 634 | .num_remappable_wins = 4, |
560 | .win_cfg_offset = orion_mbus_win_offset, | 635 | .win_cfg_offset = orion_mbus_win_offset, |
636 | .save_cpu_target = mvebu_mbus_default_save_cpu_target, | ||
561 | .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, | 637 | .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, |
562 | .show_cpu_target = mvebu_sdram_debug_show_orion, | 638 | .show_cpu_target = mvebu_sdram_debug_show_orion, |
563 | }; | 639 | }; |
@@ -566,6 +642,7 @@ static const struct mvebu_mbus_soc_data dove_mbus_data = { | |||
566 | .num_wins = 8, | 642 | .num_wins = 8, |
567 | .num_remappable_wins = 4, | 643 | .num_remappable_wins = 4, |
568 | .win_cfg_offset = orion_mbus_win_offset, | 644 | .win_cfg_offset = orion_mbus_win_offset, |
645 | .save_cpu_target = mvebu_mbus_dove_save_cpu_target, | ||
569 | .setup_cpu_target = mvebu_mbus_dove_setup_cpu_target, | 646 | .setup_cpu_target = mvebu_mbus_dove_setup_cpu_target, |
570 | .show_cpu_target = mvebu_sdram_debug_show_dove, | 647 | .show_cpu_target = mvebu_sdram_debug_show_dove, |
571 | }; | 648 | }; |
@@ -578,6 +655,7 @@ static const struct mvebu_mbus_soc_data orion5x_4win_mbus_data = { | |||
578 | .num_wins = 8, | 655 | .num_wins = 8, |
579 | .num_remappable_wins = 4, | 656 | .num_remappable_wins = 4, |
580 | .win_cfg_offset = orion_mbus_win_offset, | 657 | .win_cfg_offset = orion_mbus_win_offset, |
658 | .save_cpu_target = mvebu_mbus_default_save_cpu_target, | ||
581 | .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, | 659 | .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, |
582 | .show_cpu_target = mvebu_sdram_debug_show_orion, | 660 | .show_cpu_target = mvebu_sdram_debug_show_orion, |
583 | }; | 661 | }; |
@@ -586,6 +664,7 @@ static const struct mvebu_mbus_soc_data orion5x_2win_mbus_data = { | |||
586 | .num_wins = 8, | 664 | .num_wins = 8, |
587 | .num_remappable_wins = 2, | 665 | .num_remappable_wins = 2, |
588 | .win_cfg_offset = orion_mbus_win_offset, | 666 | .win_cfg_offset = orion_mbus_win_offset, |
667 | .save_cpu_target = mvebu_mbus_default_save_cpu_target, | ||
589 | .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, | 668 | .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, |
590 | .show_cpu_target = mvebu_sdram_debug_show_orion, | 669 | .show_cpu_target = mvebu_sdram_debug_show_orion, |
591 | }; | 670 | }; |
@@ -594,6 +673,7 @@ static const struct mvebu_mbus_soc_data mv78xx0_mbus_data = { | |||
594 | .num_wins = 14, | 673 | .num_wins = 14, |
595 | .num_remappable_wins = 8, | 674 | .num_remappable_wins = 8, |
596 | .win_cfg_offset = mv78xx0_mbus_win_offset, | 675 | .win_cfg_offset = mv78xx0_mbus_win_offset, |
676 | .save_cpu_target = mvebu_mbus_default_save_cpu_target, | ||
597 | .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, | 677 | .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, |
598 | .show_cpu_target = mvebu_sdram_debug_show_orion, | 678 | .show_cpu_target = mvebu_sdram_debug_show_orion, |
599 | }; | 679 | }; |
@@ -698,11 +778,73 @@ static __init int mvebu_mbus_debugfs_init(void) | |||
698 | } | 778 | } |
699 | fs_initcall(mvebu_mbus_debugfs_init); | 779 | fs_initcall(mvebu_mbus_debugfs_init); |
700 | 780 | ||
781 | static int mvebu_mbus_suspend(void) | ||
782 | { | ||
783 | struct mvebu_mbus_state *s = &mbus_state; | ||
784 | int win; | ||
785 | |||
786 | if (!s->mbusbridge_base) | ||
787 | return -ENODEV; | ||
788 | |||
789 | for (win = 0; win < s->soc->num_wins; win++) { | ||
790 | void __iomem *addr = s->mbuswins_base + | ||
791 | s->soc->win_cfg_offset(win); | ||
792 | |||
793 | s->wins[win].base = readl(addr + WIN_BASE_OFF); | ||
794 | s->wins[win].ctrl = readl(addr + WIN_CTRL_OFF); | ||
795 | |||
796 | if (win >= s->soc->num_remappable_wins) | ||
797 | continue; | ||
798 | |||
799 | s->wins[win].remap_lo = readl(addr + WIN_REMAP_LO_OFF); | ||
800 | s->wins[win].remap_hi = readl(addr + WIN_REMAP_HI_OFF); | ||
801 | } | ||
802 | |||
803 | s->mbus_bridge_ctrl = readl(s->mbusbridge_base + | ||
804 | MBUS_BRIDGE_CTRL_OFF); | ||
805 | s->mbus_bridge_base = readl(s->mbusbridge_base + | ||
806 | MBUS_BRIDGE_BASE_OFF); | ||
807 | |||
808 | return 0; | ||
809 | } | ||
810 | |||
811 | static void mvebu_mbus_resume(void) | ||
812 | { | ||
813 | struct mvebu_mbus_state *s = &mbus_state; | ||
814 | int win; | ||
815 | |||
816 | writel(s->mbus_bridge_ctrl, | ||
817 | s->mbusbridge_base + MBUS_BRIDGE_CTRL_OFF); | ||
818 | writel(s->mbus_bridge_base, | ||
819 | s->mbusbridge_base + MBUS_BRIDGE_BASE_OFF); | ||
820 | |||
821 | for (win = 0; win < s->soc->num_wins; win++) { | ||
822 | void __iomem *addr = s->mbuswins_base + | ||
823 | s->soc->win_cfg_offset(win); | ||
824 | |||
825 | writel(s->wins[win].base, addr + WIN_BASE_OFF); | ||
826 | writel(s->wins[win].ctrl, addr + WIN_CTRL_OFF); | ||
827 | |||
828 | if (win >= s->soc->num_remappable_wins) | ||
829 | continue; | ||
830 | |||
831 | writel(s->wins[win].remap_lo, addr + WIN_REMAP_LO_OFF); | ||
832 | writel(s->wins[win].remap_hi, addr + WIN_REMAP_HI_OFF); | ||
833 | } | ||
834 | } | ||
835 | |||
836 | struct syscore_ops mvebu_mbus_syscore_ops = { | ||
837 | .suspend = mvebu_mbus_suspend, | ||
838 | .resume = mvebu_mbus_resume, | ||
839 | }; | ||
840 | |||
701 | static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, | 841 | static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, |
702 | phys_addr_t mbuswins_phys_base, | 842 | phys_addr_t mbuswins_phys_base, |
703 | size_t mbuswins_size, | 843 | size_t mbuswins_size, |
704 | phys_addr_t sdramwins_phys_base, | 844 | phys_addr_t sdramwins_phys_base, |
705 | size_t sdramwins_size) | 845 | size_t sdramwins_size, |
846 | phys_addr_t mbusbridge_phys_base, | ||
847 | size_t mbusbridge_size) | ||
706 | { | 848 | { |
707 | int win; | 849 | int win; |
708 | 850 | ||
@@ -716,11 +858,26 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, | |||
716 | return -ENOMEM; | 858 | return -ENOMEM; |
717 | } | 859 | } |
718 | 860 | ||
861 | mbus->sdramwins_phys_base = sdramwins_phys_base; | ||
862 | |||
863 | if (mbusbridge_phys_base) { | ||
864 | mbus->mbusbridge_base = ioremap(mbusbridge_phys_base, | ||
865 | mbusbridge_size); | ||
866 | if (!mbus->mbusbridge_base) { | ||
867 | iounmap(mbus->sdramwins_base); | ||
868 | iounmap(mbus->mbuswins_base); | ||
869 | return -ENOMEM; | ||
870 | } | ||
871 | } else | ||
872 | mbus->mbusbridge_base = NULL; | ||
873 | |||
719 | for (win = 0; win < mbus->soc->num_wins; win++) | 874 | for (win = 0; win < mbus->soc->num_wins; win++) |
720 | mvebu_mbus_disable_window(mbus, win); | 875 | mvebu_mbus_disable_window(mbus, win); |
721 | 876 | ||
722 | mbus->soc->setup_cpu_target(mbus); | 877 | mbus->soc->setup_cpu_target(mbus); |
723 | 878 | ||
879 | register_syscore_ops(&mvebu_mbus_syscore_ops); | ||
880 | |||
724 | return 0; | 881 | return 0; |
725 | } | 882 | } |
726 | 883 | ||
@@ -746,7 +903,7 @@ int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base, | |||
746 | mbuswins_phys_base, | 903 | mbuswins_phys_base, |
747 | mbuswins_size, | 904 | mbuswins_size, |
748 | sdramwins_phys_base, | 905 | sdramwins_phys_base, |
749 | sdramwins_size); | 906 | sdramwins_size, 0, 0); |
750 | } | 907 | } |
751 | 908 | ||
752 | #ifdef CONFIG_OF | 909 | #ifdef CONFIG_OF |
@@ -887,7 +1044,7 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np, | |||
887 | 1044 | ||
888 | int __init mvebu_mbus_dt_init(bool is_coherent) | 1045 | int __init mvebu_mbus_dt_init(bool is_coherent) |
889 | { | 1046 | { |
890 | struct resource mbuswins_res, sdramwins_res; | 1047 | struct resource mbuswins_res, sdramwins_res, mbusbridge_res; |
891 | struct device_node *np, *controller; | 1048 | struct device_node *np, *controller; |
892 | const struct of_device_id *of_id; | 1049 | const struct of_device_id *of_id; |
893 | const __be32 *prop; | 1050 | const __be32 *prop; |
@@ -923,6 +1080,19 @@ int __init mvebu_mbus_dt_init(bool is_coherent) | |||
923 | return -EINVAL; | 1080 | return -EINVAL; |
924 | } | 1081 | } |
925 | 1082 | ||
1083 | /* | ||
1084 | * Set the resource to 0 so that it can be left unmapped by | ||
1085 | * mvebu_mbus_common_init() if the DT doesn't carry the | ||
1086 | * necessary information. This is needed to preserve backward | ||
1087 | * compatibility. | ||
1088 | */ | ||
1089 | memset(&mbusbridge_res, 0, sizeof(mbusbridge_res)); | ||
1090 | |||
1091 | if (mbus_state.soc->has_mbus_bridge) { | ||
1092 | if (of_address_to_resource(controller, 2, &mbusbridge_res)) | ||
1093 | pr_warn(FW_WARN "deprecated mbus-mvebu Device Tree, suspend/resume will not work\n"); | ||
1094 | } | ||
1095 | |||
926 | mbus_state.hw_io_coherency = is_coherent; | 1096 | mbus_state.hw_io_coherency = is_coherent; |
927 | 1097 | ||
928 | /* Get optional pcie-{mem,io}-aperture properties */ | 1098 | /* Get optional pcie-{mem,io}-aperture properties */ |
@@ -933,7 +1103,9 @@ int __init mvebu_mbus_dt_init(bool is_coherent) | |||
933 | mbuswins_res.start, | 1103 | mbuswins_res.start, |
934 | resource_size(&mbuswins_res), | 1104 | resource_size(&mbuswins_res), |
935 | sdramwins_res.start, | 1105 | sdramwins_res.start, |
936 | resource_size(&sdramwins_res)); | 1106 | resource_size(&sdramwins_res), |
1107 | mbusbridge_res.start, | ||
1108 | resource_size(&mbusbridge_res)); | ||
937 | if (ret) | 1109 | if (ret) |
938 | return ret; | 1110 | return ret; |
939 | 1111 | ||
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c index 5511f9814ddd..723ec06ad2c8 100644 --- a/drivers/bus/omap-ocp2scp.c +++ b/drivers/bus/omap-ocp2scp.c | |||
@@ -77,7 +77,6 @@ static struct platform_driver omap_ocp2scp_driver = { | |||
77 | .remove = omap_ocp2scp_remove, | 77 | .remove = omap_ocp2scp_remove, |
78 | .driver = { | 78 | .driver = { |
79 | .name = "omap-ocp2scp", | 79 | .name = "omap-ocp2scp", |
80 | .owner = THIS_MODULE, | ||
81 | .of_match_table = of_match_ptr(omap_ocp2scp_id_table), | 80 | .of_match_table = of_match_ptr(omap_ocp2scp_id_table), |
82 | }, | 81 | }, |
83 | }; | 82 | }; |
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c index 531ae591783b..029bc73de001 100644 --- a/drivers/bus/omap_l3_noc.c +++ b/drivers/bus/omap_l3_noc.c | |||
@@ -222,10 +222,14 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3) | |||
222 | } | 222 | } |
223 | 223 | ||
224 | /* Error found so break the for loop */ | 224 | /* Error found so break the for loop */ |
225 | break; | 225 | return IRQ_HANDLED; |
226 | } | 226 | } |
227 | } | 227 | } |
228 | return IRQ_HANDLED; | 228 | |
229 | dev_err(l3->dev, "L3 %s IRQ not handled!!\n", | ||
230 | inttype ? "debug" : "application"); | ||
231 | |||
232 | return IRQ_NONE; | ||
229 | } | 233 | } |
230 | 234 | ||
231 | static const struct of_device_id l3_noc_match[] = { | 235 | static const struct of_device_id l3_noc_match[] = { |
@@ -296,11 +300,65 @@ static int omap_l3_probe(struct platform_device *pdev) | |||
296 | return ret; | 300 | return ret; |
297 | } | 301 | } |
298 | 302 | ||
303 | #ifdef CONFIG_PM | ||
304 | |||
305 | /** | ||
306 | * l3_resume_noirq() - resume function for l3_noc | ||
307 | * @dev: pointer to l3_noc device structure | ||
308 | * | ||
309 | * We only have the resume handler only since we | ||
310 | * have already maintained the delta register | ||
311 | * configuration as part of configuring the system | ||
312 | */ | ||
313 | static int l3_resume_noirq(struct device *dev) | ||
314 | { | ||
315 | struct omap_l3 *l3 = dev_get_drvdata(dev); | ||
316 | int i; | ||
317 | struct l3_flagmux_data *flag_mux; | ||
318 | void __iomem *base, *mask_regx = NULL; | ||
319 | u32 mask_val; | ||
320 | |||
321 | for (i = 0; i < l3->num_modules; i++) { | ||
322 | base = l3->l3_base[i]; | ||
323 | flag_mux = l3->l3_flagmux[i]; | ||
324 | if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits) | ||
325 | continue; | ||
326 | |||
327 | mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 + | ||
328 | (L3_APPLICATION_ERROR << 3); | ||
329 | mask_val = readl_relaxed(mask_regx); | ||
330 | mask_val &= ~(flag_mux->mask_app_bits); | ||
331 | |||
332 | writel_relaxed(mask_val, mask_regx); | ||
333 | mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 + | ||
334 | (L3_DEBUG_ERROR << 3); | ||
335 | mask_val = readl_relaxed(mask_regx); | ||
336 | mask_val &= ~(flag_mux->mask_dbg_bits); | ||
337 | |||
338 | writel_relaxed(mask_val, mask_regx); | ||
339 | } | ||
340 | |||
341 | /* Dummy read to force OCP barrier */ | ||
342 | if (mask_regx) | ||
343 | (void)readl(mask_regx); | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | static const struct dev_pm_ops l3_dev_pm_ops = { | ||
349 | .resume_noirq = l3_resume_noirq, | ||
350 | }; | ||
351 | |||
352 | #define L3_DEV_PM_OPS (&l3_dev_pm_ops) | ||
353 | #else | ||
354 | #define L3_DEV_PM_OPS NULL | ||
355 | #endif | ||
356 | |||
299 | static struct platform_driver omap_l3_driver = { | 357 | static struct platform_driver omap_l3_driver = { |
300 | .probe = omap_l3_probe, | 358 | .probe = omap_l3_probe, |
301 | .driver = { | 359 | .driver = { |
302 | .name = "omap_l3_noc", | 360 | .name = "omap_l3_noc", |
303 | .owner = THIS_MODULE, | 361 | .pm = L3_DEV_PM_OPS, |
304 | .of_match_table = of_match_ptr(l3_noc_match), | 362 | .of_match_table = of_match_ptr(l3_noc_match), |
305 | }, | 363 | }, |
306 | }; | 364 | }; |
diff --git a/drivers/bus/omap_l3_smx.c b/drivers/bus/omap_l3_smx.c index acc216491b8a..597fdaee7315 100644 --- a/drivers/bus/omap_l3_smx.c +++ b/drivers/bus/omap_l3_smx.c | |||
@@ -27,6 +27,10 @@ | |||
27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
28 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
29 | #include <linux/io.h> | 29 | #include <linux/io.h> |
30 | #include <linux/module.h> | ||
31 | #include <linux/of.h> | ||
32 | #include <linux/of_device.h> | ||
33 | |||
30 | #include "omap_l3_smx.h" | 34 | #include "omap_l3_smx.h" |
31 | 35 | ||
32 | static inline u64 omap3_l3_readll(void __iomem *base, u16 reg) | 36 | static inline u64 omap3_l3_readll(void __iomem *base, u16 reg) |
@@ -211,7 +215,17 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3) | |||
211 | return ret; | 215 | return ret; |
212 | } | 216 | } |
213 | 217 | ||
214 | static int __init omap3_l3_probe(struct platform_device *pdev) | 218 | #if IS_BUILTIN(CONFIG_OF) |
219 | static const struct of_device_id omap3_l3_match[] = { | ||
220 | { | ||
221 | .compatible = "ti,omap3-l3-smx", | ||
222 | }, | ||
223 | { }, | ||
224 | }; | ||
225 | MODULE_DEVICE_TABLE(of, omap3_l3_match); | ||
226 | #endif | ||
227 | |||
228 | static int omap3_l3_probe(struct platform_device *pdev) | ||
215 | { | 229 | { |
216 | struct omap3_l3 *l3; | 230 | struct omap3_l3 *l3; |
217 | struct resource *res; | 231 | struct resource *res; |
@@ -265,7 +279,7 @@ err0: | |||
265 | return ret; | 279 | return ret; |
266 | } | 280 | } |
267 | 281 | ||
268 | static int __exit omap3_l3_remove(struct platform_device *pdev) | 282 | static int omap3_l3_remove(struct platform_device *pdev) |
269 | { | 283 | { |
270 | struct omap3_l3 *l3 = platform_get_drvdata(pdev); | 284 | struct omap3_l3 *l3 = platform_get_drvdata(pdev); |
271 | 285 | ||
@@ -278,15 +292,17 @@ static int __exit omap3_l3_remove(struct platform_device *pdev) | |||
278 | } | 292 | } |
279 | 293 | ||
280 | static struct platform_driver omap3_l3_driver = { | 294 | static struct platform_driver omap3_l3_driver = { |
281 | .remove = __exit_p(omap3_l3_remove), | 295 | .probe = omap3_l3_probe, |
296 | .remove = omap3_l3_remove, | ||
282 | .driver = { | 297 | .driver = { |
283 | .name = "omap_l3_smx", | 298 | .name = "omap_l3_smx", |
299 | .of_match_table = of_match_ptr(omap3_l3_match), | ||
284 | }, | 300 | }, |
285 | }; | 301 | }; |
286 | 302 | ||
287 | static int __init omap3_l3_init(void) | 303 | static int __init omap3_l3_init(void) |
288 | { | 304 | { |
289 | return platform_driver_probe(&omap3_l3_driver, omap3_l3_probe); | 305 | return platform_driver_register(&omap3_l3_driver); |
290 | } | 306 | } |
291 | postcore_initcall_sync(omap3_l3_init); | 307 | postcore_initcall_sync(omap3_l3_init); |
292 | 308 | ||