aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/bus
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-09 17:48:22 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-09 17:48:22 -0500
commit3a647c1d7ab08145cee4b650f5e797d168846c51 (patch)
tree6fcbc8ad1fc69b5a99214e22f6084452bdf0131c /drivers/bus
parent6cd94d5e57ab97ddd672b707ab4bb639672c1727 (diff)
parent5db45002576f7d60c5bf7b23e277845cd3e806be (diff)
Merge tag 'drivers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC driver updates from Arnd Bergmann: "These are changes for drivers that are intimately tied to some SoC and for some reason could not get merged through the respective subsystem maintainer tree. The largest single change here this time around is the Tegra iommu/memory controller driver, which gets updated to the new iommu DT binding. More drivers like this are likely to follow for the following merge window, but we should be able to do those through the iommu maintainer. Other notable changes are: - reset controller drivers from the reset maintainer (socfpga, sti, berlin) - fixes for the keystone navigator driver merged last time - at91 rtc driver changes related to the at91 cleanups - ARM perf driver changes from Will Deacon - updates for the brcmstb_gisb driver" * tag 'drivers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (53 commits) clocksource: arch_timer: Allow the device tree to specify uninitialized timer registers clocksource: arch_timer: Fix code to use physical timers when requested memory: Add NVIDIA Tegra memory controller support bus: brcmstb_gisb: Add register offset tables for older chips bus: brcmstb_gisb: Look up register offsets in a table bus: brcmstb_gisb: Introduce wrapper functions for MMIO accesses bus: brcmstb_gisb: Make the driver buildable on MIPS of: Add NVIDIA Tegra memory controller binding ARM: tegra: Move AHB Kconfig to drivers/amba amba: Add Kconfig file clk: tegra: Implement memory-controller clock serial: samsung: Fix serial config dependencies for exynos7 bus: brcmstb_gisb: resolve section mismatch ARM: common: edma: edma_pm_resume may be unused ARM: common: edma: add suspend resume hook powerpc/iommu: Rename iommu_[un]map_sg functions rtc: at91sam9: add DT bindings documentation rtc: at91sam9: use clk API instead of relying on AT91_SLOW_CLOCK ARM: at91: add clk_lookup entry for RTT devices rtc: at91sam9: rework the Kconfig description ...
Diffstat (limited to 'drivers/bus')
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/arm-cci.c552
-rw-r--r--drivers/bus/brcmstb_gisb.c130
-rw-r--r--drivers/bus/omap_l3_noc.c63
4 files changed, 612 insertions, 135 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 603eb1be4f6a..b99729e36860 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -6,7 +6,7 @@ menu "Bus devices"
6 6
7config BRCMSTB_GISB_ARB 7config BRCMSTB_GISB_ARB
8 bool "Broadcom STB GISB bus arbiter" 8 bool "Broadcom STB GISB bus arbiter"
9 depends on ARM 9 depends on ARM || MIPS
10 help 10 help
11 Driver for the Broadcom Set Top Box System-on-a-chip internal bus 11 Driver for the Broadcom Set Top Box System-on-a-chip internal bus
12 arbiter. This driver provides timeout and target abort error handling 12 arbiter. This driver provides timeout and target abort error handling
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 7af78df241f2..860da40b78ef 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -16,17 +16,17 @@
16 16
17#include <linux/arm-cci.h> 17#include <linux/arm-cci.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/interrupt.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/of_address.h> 21#include <linux/of_address.h>
21#include <linux/of_irq.h> 22#include <linux/of_irq.h>
22#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/perf_event.h>
23#include <linux/platform_device.h> 25#include <linux/platform_device.h>
24#include <linux/slab.h> 26#include <linux/slab.h>
25#include <linux/spinlock.h> 27#include <linux/spinlock.h>
26 28
27#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
28#include <asm/irq_regs.h>
29#include <asm/pmu.h>
30#include <asm/smp_plat.h> 30#include <asm/smp_plat.h>
31 31
32#define DRIVER_NAME "CCI-400" 32#define DRIVER_NAME "CCI-400"
@@ -98,6 +98,8 @@ static unsigned long cci_ctrl_phys;
98 98
99#define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K) 99#define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K)
100 100
101#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
102
101/* 103/*
102 * Instead of an event id to monitor CCI cycles, a dedicated counter is 104 * Instead of an event id to monitor CCI cycles, a dedicated counter is
103 * provided. Use 0xff to represent CCI cycles and hope that no future revisions 105 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
@@ -170,18 +172,29 @@ static char *const pmu_names[] = {
170 [CCI_REV_R1] = "CCI_400_r1", 172 [CCI_REV_R1] = "CCI_400_r1",
171}; 173};
172 174
173struct cci_pmu_drv_data { 175struct cci_pmu_hw_events {
176 struct perf_event *events[CCI_PMU_MAX_HW_EVENTS];
177 unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)];
178 raw_spinlock_t pmu_lock;
179};
180
181struct cci_pmu {
174 void __iomem *base; 182 void __iomem *base;
175 struct arm_pmu *cci_pmu; 183 struct pmu pmu;
176 int nr_irqs; 184 int nr_irqs;
177 int irqs[CCI_PMU_MAX_HW_EVENTS]; 185 int irqs[CCI_PMU_MAX_HW_EVENTS];
178 unsigned long active_irqs; 186 unsigned long active_irqs;
179 struct perf_event *events[CCI_PMU_MAX_HW_EVENTS];
180 unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)];
181 struct pmu_port_event_ranges *port_ranges; 187 struct pmu_port_event_ranges *port_ranges;
182 struct pmu_hw_events hw_events; 188 struct cci_pmu_hw_events hw_events;
189 struct platform_device *plat_device;
190 int num_events;
191 atomic_t active_events;
192 struct mutex reserve_mutex;
193 cpumask_t cpus;
183}; 194};
184static struct cci_pmu_drv_data *pmu; 195static struct cci_pmu *pmu;
196
197#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
185 198
186static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) 199static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
187{ 200{
@@ -252,7 +265,7 @@ static int pmu_validate_hw_event(u8 hw_event)
252 return -ENOENT; 265 return -ENOENT;
253} 266}
254 267
255static int pmu_is_valid_counter(struct arm_pmu *cci_pmu, int idx) 268static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
256{ 269{
257 return CCI_PMU_CYCLE_CNTR_IDX <= idx && 270 return CCI_PMU_CYCLE_CNTR_IDX <= idx &&
258 idx <= CCI_PMU_CNTR_LAST(cci_pmu); 271 idx <= CCI_PMU_CNTR_LAST(cci_pmu);
@@ -293,14 +306,9 @@ static u32 pmu_get_max_counters(void)
293 return n_cnts + 1; 306 return n_cnts + 1;
294} 307}
295 308
296static struct pmu_hw_events *pmu_get_hw_events(void) 309static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
297{
298 return &pmu->hw_events;
299}
300
301static int pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event)
302{ 310{
303 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); 311 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
304 struct hw_perf_event *hw_event = &event->hw; 312 struct hw_perf_event *hw_event = &event->hw;
305 unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK; 313 unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK;
306 int idx; 314 int idx;
@@ -336,7 +344,7 @@ static int pmu_map_event(struct perf_event *event)
336 return mapping; 344 return mapping;
337} 345}
338 346
339static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler) 347static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
340{ 348{
341 int i; 349 int i;
342 struct platform_device *pmu_device = cci_pmu->plat_device; 350 struct platform_device *pmu_device = cci_pmu->plat_device;
@@ -371,17 +379,91 @@ static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler)
371 return 0; 379 return 0;
372} 380}
373 381
382static void pmu_free_irq(struct cci_pmu *cci_pmu)
383{
384 int i;
385
386 for (i = 0; i < pmu->nr_irqs; i++) {
387 if (!test_and_clear_bit(i, &pmu->active_irqs))
388 continue;
389
390 free_irq(pmu->irqs[i], cci_pmu);
391 }
392}
393
394static u32 pmu_read_counter(struct perf_event *event)
395{
396 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
397 struct hw_perf_event *hw_counter = &event->hw;
398 int idx = hw_counter->idx;
399 u32 value;
400
401 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
402 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
403 return 0;
404 }
405 value = pmu_read_register(idx, CCI_PMU_CNTR);
406
407 return value;
408}
409
410static void pmu_write_counter(struct perf_event *event, u32 value)
411{
412 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
413 struct hw_perf_event *hw_counter = &event->hw;
414 int idx = hw_counter->idx;
415
416 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx)))
417 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
418 else
419 pmu_write_register(value, idx, CCI_PMU_CNTR);
420}
421
422static u64 pmu_event_update(struct perf_event *event)
423{
424 struct hw_perf_event *hwc = &event->hw;
425 u64 delta, prev_raw_count, new_raw_count;
426
427 do {
428 prev_raw_count = local64_read(&hwc->prev_count);
429 new_raw_count = pmu_read_counter(event);
430 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
431 new_raw_count) != prev_raw_count);
432
433 delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK;
434
435 local64_add(delta, &event->count);
436
437 return new_raw_count;
438}
439
440static void pmu_read(struct perf_event *event)
441{
442 pmu_event_update(event);
443}
444
445void pmu_event_set_period(struct perf_event *event)
446{
447 struct hw_perf_event *hwc = &event->hw;
448 /*
449 * The CCI PMU counters have a period of 2^32. To account for the
450 * possiblity of extreme interrupt latency we program for a period of
451 * half that. Hopefully we can handle the interrupt before another 2^31
452 * events occur and the counter overtakes its previous value.
453 */
454 u64 val = 1ULL << 31;
455 local64_set(&hwc->prev_count, val);
456 pmu_write_counter(event, val);
457}
458
374static irqreturn_t pmu_handle_irq(int irq_num, void *dev) 459static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
375{ 460{
376 unsigned long flags; 461 unsigned long flags;
377 struct arm_pmu *cci_pmu = (struct arm_pmu *)dev; 462 struct cci_pmu *cci_pmu = dev;
378 struct pmu_hw_events *events = cci_pmu->get_hw_events(); 463 struct cci_pmu_hw_events *events = &pmu->hw_events;
379 struct perf_sample_data data;
380 struct pt_regs *regs;
381 int idx, handled = IRQ_NONE; 464 int idx, handled = IRQ_NONE;
382 465
383 raw_spin_lock_irqsave(&events->pmu_lock, flags); 466 raw_spin_lock_irqsave(&events->pmu_lock, flags);
384 regs = get_irq_regs();
385 /* 467 /*
386 * Iterate over counters and update the corresponding perf events. 468 * Iterate over counters and update the corresponding perf events.
387 * This should work regardless of whether we have per-counter overflow 469 * This should work regardless of whether we have per-counter overflow
@@ -403,154 +485,407 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
403 485
404 pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW); 486 pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW);
405 487
488 pmu_event_update(event);
489 pmu_event_set_period(event);
406 handled = IRQ_HANDLED; 490 handled = IRQ_HANDLED;
407
408 armpmu_event_update(event);
409 perf_sample_data_init(&data, 0, hw_counter->last_period);
410 if (!armpmu_event_set_period(event))
411 continue;
412
413 if (perf_event_overflow(event, &data, regs))
414 cci_pmu->disable(event);
415 } 491 }
416 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 492 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
417 493
418 return IRQ_RETVAL(handled); 494 return IRQ_RETVAL(handled);
419} 495}
420 496
421static void pmu_free_irq(struct arm_pmu *cci_pmu) 497static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
422{ 498{
423 int i; 499 int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
500 if (ret) {
501 pmu_free_irq(cci_pmu);
502 return ret;
503 }
504 return 0;
505}
424 506
425 for (i = 0; i < pmu->nr_irqs; i++) { 507static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
426 if (!test_and_clear_bit(i, &pmu->active_irqs)) 508{
427 continue; 509 pmu_free_irq(cci_pmu);
510}
428 511
429 free_irq(pmu->irqs[i], cci_pmu); 512static void hw_perf_event_destroy(struct perf_event *event)
513{
514 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
515 atomic_t *active_events = &cci_pmu->active_events;
516 struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
517
518 if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) {
519 cci_pmu_put_hw(cci_pmu);
520 mutex_unlock(reserve_mutex);
430 } 521 }
431} 522}
432 523
433static void pmu_enable_event(struct perf_event *event) 524static void cci_pmu_enable(struct pmu *pmu)
434{ 525{
526 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
527 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
528 int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_events);
435 unsigned long flags; 529 unsigned long flags;
436 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); 530 u32 val;
437 struct pmu_hw_events *events = cci_pmu->get_hw_events(); 531
438 struct hw_perf_event *hw_counter = &event->hw; 532 if (!enabled)
439 int idx = hw_counter->idx; 533 return;
534
535 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
536
537 /* Enable all the PMU counters. */
538 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
539 writel(val, cci_ctrl_base + CCI_PMCR);
540 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
541
542}
543
544static void cci_pmu_disable(struct pmu *pmu)
545{
546 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
547 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
548 unsigned long flags;
549 u32 val;
550
551 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
552
553 /* Disable all the PMU counters. */
554 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
555 writel(val, cci_ctrl_base + CCI_PMCR);
556 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
557}
558
559static void cci_pmu_start(struct perf_event *event, int pmu_flags)
560{
561 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
562 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
563 struct hw_perf_event *hwc = &event->hw;
564 int idx = hwc->idx;
565 unsigned long flags;
566
567 /*
568 * To handle interrupt latency, we always reprogram the period
569 * regardlesss of PERF_EF_RELOAD.
570 */
571 if (pmu_flags & PERF_EF_RELOAD)
572 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
573
574 hwc->state = 0;
440 575
441 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { 576 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
442 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); 577 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
443 return; 578 return;
444 } 579 }
445 580
446 raw_spin_lock_irqsave(&events->pmu_lock, flags); 581 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
447 582
448 /* Configure the event to count, unless you are counting cycles */ 583 /* Configure the event to count, unless you are counting cycles */
449 if (idx != CCI_PMU_CYCLE_CNTR_IDX) 584 if (idx != CCI_PMU_CYCLE_CNTR_IDX)
450 pmu_set_event(idx, hw_counter->config_base); 585 pmu_set_event(idx, hwc->config_base);
451 586
587 pmu_event_set_period(event);
452 pmu_enable_counter(idx); 588 pmu_enable_counter(idx);
453 589
454 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 590 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
455} 591}
456 592
457static void pmu_disable_event(struct perf_event *event) 593static void cci_pmu_stop(struct perf_event *event, int pmu_flags)
458{ 594{
459 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); 595 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
460 struct hw_perf_event *hw_counter = &event->hw; 596 struct hw_perf_event *hwc = &event->hw;
461 int idx = hw_counter->idx; 597 int idx = hwc->idx;
598
599 if (hwc->state & PERF_HES_STOPPED)
600 return;
462 601
463 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { 602 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
464 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); 603 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
465 return; 604 return;
466 } 605 }
467 606
607 /*
608 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
609 * cci_pmu_start()
610 */
468 pmu_disable_counter(idx); 611 pmu_disable_counter(idx);
612 pmu_event_update(event);
613 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
469} 614}
470 615
471static void pmu_start(struct arm_pmu *cci_pmu) 616static int cci_pmu_add(struct perf_event *event, int flags)
472{ 617{
473 u32 val; 618 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
474 unsigned long flags; 619 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
475 struct pmu_hw_events *events = cci_pmu->get_hw_events(); 620 struct hw_perf_event *hwc = &event->hw;
621 int idx;
622 int err = 0;
476 623
477 raw_spin_lock_irqsave(&events->pmu_lock, flags); 624 perf_pmu_disable(event->pmu);
478 625
479 /* Enable all the PMU counters. */ 626 /* If we don't have a space for the counter then finish early. */
480 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; 627 idx = pmu_get_event_idx(hw_events, event);
481 writel(val, cci_ctrl_base + CCI_PMCR); 628 if (idx < 0) {
629 err = idx;
630 goto out;
631 }
482 632
483 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 633 event->hw.idx = idx;
634 hw_events->events[idx] = event;
635
636 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
637 if (flags & PERF_EF_START)
638 cci_pmu_start(event, PERF_EF_RELOAD);
639
640 /* Propagate our changes to the userspace mapping. */
641 perf_event_update_userpage(event);
642
643out:
644 perf_pmu_enable(event->pmu);
645 return err;
484} 646}
485 647
486static void pmu_stop(struct arm_pmu *cci_pmu) 648static void cci_pmu_del(struct perf_event *event, int flags)
487{ 649{
488 u32 val; 650 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
489 unsigned long flags; 651 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
490 struct pmu_hw_events *events = cci_pmu->get_hw_events(); 652 struct hw_perf_event *hwc = &event->hw;
653 int idx = hwc->idx;
491 654
492 raw_spin_lock_irqsave(&events->pmu_lock, flags); 655 cci_pmu_stop(event, PERF_EF_UPDATE);
656 hw_events->events[idx] = NULL;
657 clear_bit(idx, hw_events->used_mask);
493 658
494 /* Disable all the PMU counters. */ 659 perf_event_update_userpage(event);
495 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; 660}
496 writel(val, cci_ctrl_base + CCI_PMCR);
497 661
498 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 662static int
663validate_event(struct cci_pmu_hw_events *hw_events,
664 struct perf_event *event)
665{
666 if (is_software_event(event))
667 return 1;
668
669 if (event->state < PERF_EVENT_STATE_OFF)
670 return 1;
671
672 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
673 return 1;
674
675 return pmu_get_event_idx(hw_events, event) >= 0;
499} 676}
500 677
501static u32 pmu_read_counter(struct perf_event *event) 678static int
679validate_group(struct perf_event *event)
502{ 680{
503 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); 681 struct perf_event *sibling, *leader = event->group_leader;
504 struct hw_perf_event *hw_counter = &event->hw; 682 struct cci_pmu_hw_events fake_pmu = {
505 int idx = hw_counter->idx; 683 /*
506 u32 value; 684 * Initialise the fake PMU. We only need to populate the
685 * used_mask for the purposes of validation.
686 */
687 .used_mask = CPU_BITS_NONE,
688 };
507 689
508 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { 690 if (!validate_event(&fake_pmu, leader))
509 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); 691 return -EINVAL;
510 return 0; 692
693 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
694 if (!validate_event(&fake_pmu, sibling))
695 return -EINVAL;
511 } 696 }
512 value = pmu_read_register(idx, CCI_PMU_CNTR);
513 697
514 return value; 698 if (!validate_event(&fake_pmu, event))
699 return -EINVAL;
700
701 return 0;
515} 702}
516 703
517static void pmu_write_counter(struct perf_event *event, u32 value) 704static int
705__hw_perf_event_init(struct perf_event *event)
518{ 706{
519 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); 707 struct hw_perf_event *hwc = &event->hw;
520 struct hw_perf_event *hw_counter = &event->hw; 708 int mapping;
521 int idx = hw_counter->idx;
522 709
523 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) 710 mapping = pmu_map_event(event);
524 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); 711
525 else 712 if (mapping < 0) {
526 pmu_write_register(value, idx, CCI_PMU_CNTR); 713 pr_debug("event %x:%llx not supported\n", event->attr.type,
714 event->attr.config);
715 return mapping;
716 }
717
718 /*
719 * We don't assign an index until we actually place the event onto
720 * hardware. Use -1 to signify that we haven't decided where to put it
721 * yet.
722 */
723 hwc->idx = -1;
724 hwc->config_base = 0;
725 hwc->config = 0;
726 hwc->event_base = 0;
727
728 /*
729 * Store the event encoding into the config_base field.
730 */
731 hwc->config_base |= (unsigned long)mapping;
732
733 /*
734 * Limit the sample_period to half of the counter width. That way, the
735 * new counter value is far less likely to overtake the previous one
736 * unless you have some serious IRQ latency issues.
737 */
738 hwc->sample_period = CCI_PMU_CNTR_MASK >> 1;
739 hwc->last_period = hwc->sample_period;
740 local64_set(&hwc->period_left, hwc->sample_period);
741
742 if (event->group_leader != event) {
743 if (validate_group(event) != 0)
744 return -EINVAL;
745 }
746
747 return 0;
748}
749
750static int cci_pmu_event_init(struct perf_event *event)
751{
752 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
753 atomic_t *active_events = &cci_pmu->active_events;
754 int err = 0;
755 int cpu;
756
757 if (event->attr.type != event->pmu->type)
758 return -ENOENT;
759
760 /* Shared by all CPUs, no meaningful state to sample */
761 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
762 return -EOPNOTSUPP;
763
764 /* We have no filtering of any kind */
765 if (event->attr.exclude_user ||
766 event->attr.exclude_kernel ||
767 event->attr.exclude_hv ||
768 event->attr.exclude_idle ||
769 event->attr.exclude_host ||
770 event->attr.exclude_guest)
771 return -EINVAL;
772
773 /*
774 * Following the example set by other "uncore" PMUs, we accept any CPU
775 * and rewrite its affinity dynamically rather than having perf core
776 * handle cpu == -1 and pid == -1 for this case.
777 *
778 * The perf core will pin online CPUs for the duration of this call and
779 * the event being installed into its context, so the PMU's CPU can't
780 * change under our feet.
781 */
782 cpu = cpumask_first(&cci_pmu->cpus);
783 if (event->cpu < 0 || cpu < 0)
784 return -EINVAL;
785 event->cpu = cpu;
786
787 event->destroy = hw_perf_event_destroy;
788 if (!atomic_inc_not_zero(active_events)) {
789 mutex_lock(&cci_pmu->reserve_mutex);
790 if (atomic_read(active_events) == 0)
791 err = cci_pmu_get_hw(cci_pmu);
792 if (!err)
793 atomic_inc(active_events);
794 mutex_unlock(&cci_pmu->reserve_mutex);
795 }
796 if (err)
797 return err;
798
799 err = __hw_perf_event_init(event);
800 if (err)
801 hw_perf_event_destroy(event);
802
803 return err;
527} 804}
528 805
529static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev) 806static ssize_t pmu_attr_cpumask_show(struct device *dev,
807 struct device_attribute *attr, char *buf)
530{ 808{
531 *cci_pmu = (struct arm_pmu){ 809 int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &pmu->cpus);
532 .name = pmu_names[probe_cci_revision()], 810
533 .max_period = (1LLU << 32) - 1, 811 buf[n++] = '\n';
534 .get_hw_events = pmu_get_hw_events, 812 buf[n] = '\0';
535 .get_event_idx = pmu_get_event_idx, 813 return n;
536 .map_event = pmu_map_event, 814}
537 .request_irq = pmu_request_irq, 815
538 .handle_irq = pmu_handle_irq, 816static DEVICE_ATTR(cpumask, S_IRUGO, pmu_attr_cpumask_show, NULL);
539 .free_irq = pmu_free_irq, 817
540 .enable = pmu_enable_event, 818static struct attribute *pmu_attrs[] = {
541 .disable = pmu_disable_event, 819 &dev_attr_cpumask.attr,
542 .start = pmu_start, 820 NULL,
543 .stop = pmu_stop, 821};
544 .read_counter = pmu_read_counter, 822
545 .write_counter = pmu_write_counter, 823static struct attribute_group pmu_attr_group = {
824 .attrs = pmu_attrs,
825};
826
827static const struct attribute_group *pmu_attr_groups[] = {
828 &pmu_attr_group,
829 NULL
830};
831
832static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
833{
834 char *name = pmu_names[probe_cci_revision()];
835 cci_pmu->pmu = (struct pmu) {
836 .name = pmu_names[probe_cci_revision()],
837 .task_ctx_nr = perf_invalid_context,
838 .pmu_enable = cci_pmu_enable,
839 .pmu_disable = cci_pmu_disable,
840 .event_init = cci_pmu_event_init,
841 .add = cci_pmu_add,
842 .del = cci_pmu_del,
843 .start = cci_pmu_start,
844 .stop = cci_pmu_stop,
845 .read = pmu_read,
846 .attr_groups = pmu_attr_groups,
546 }; 847 };
547 848
548 cci_pmu->plat_device = pdev; 849 cci_pmu->plat_device = pdev;
549 cci_pmu->num_events = pmu_get_max_counters(); 850 cci_pmu->num_events = pmu_get_max_counters();
550 851
551 return armpmu_register(cci_pmu, -1); 852 return perf_pmu_register(&cci_pmu->pmu, name, -1);
552} 853}
553 854
855static int cci_pmu_cpu_notifier(struct notifier_block *self,
856 unsigned long action, void *hcpu)
857{
858 unsigned int cpu = (long)hcpu;
859 unsigned int target;
860
861 switch (action & ~CPU_TASKS_FROZEN) {
862 case CPU_DOWN_PREPARE:
863 if (!cpumask_test_and_clear_cpu(cpu, &pmu->cpus))
864 break;
865 target = cpumask_any_but(cpu_online_mask, cpu);
866 if (target < 0) // UP, last CPU
867 break;
868 /*
869 * TODO: migrate context once core races on event->ctx have
870 * been fixed.
871 */
872 cpumask_set_cpu(target, &pmu->cpus);
873 default:
874 break;
875 }
876
877 return NOTIFY_OK;
878}
879
880static struct notifier_block cci_pmu_cpu_nb = {
881 .notifier_call = cci_pmu_cpu_notifier,
882 /*
883 * to migrate uncore events, our notifier should be executed
884 * before perf core's notifier.
885 */
886 .priority = CPU_PRI_PERF + 1,
887};
888
554static const struct of_device_id arm_cci_pmu_matches[] = { 889static const struct of_device_id arm_cci_pmu_matches[] = {
555 { 890 {
556 .compatible = "arm,cci-400-pmu", 891 .compatible = "arm,cci-400-pmu",
@@ -604,15 +939,16 @@ static int cci_pmu_probe(struct platform_device *pdev)
604 return -EINVAL; 939 return -EINVAL;
605 } 940 }
606 941
607 pmu->cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*(pmu->cci_pmu)), GFP_KERNEL);
608 if (!pmu->cci_pmu)
609 return -ENOMEM;
610
611 pmu->hw_events.events = pmu->events;
612 pmu->hw_events.used_mask = pmu->used_mask;
613 raw_spin_lock_init(&pmu->hw_events.pmu_lock); 942 raw_spin_lock_init(&pmu->hw_events.pmu_lock);
943 mutex_init(&pmu->reserve_mutex);
944 atomic_set(&pmu->active_events, 0);
945 cpumask_set_cpu(smp_processor_id(), &pmu->cpus);
946
947 ret = register_cpu_notifier(&cci_pmu_cpu_nb);
948 if (ret)
949 return ret;
614 950
615 ret = cci_pmu_init(pmu->cci_pmu, pdev); 951 ret = cci_pmu_init(pmu, pdev);
616 if (ret) 952 if (ret)
617 return ret; 953 return ret;
618 954
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index e7ccd21a45c9..46de8dc39eb4 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -25,26 +25,72 @@
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/pm.h> 26#include <linux/pm.h>
27 27
28#ifdef CONFIG_ARM
28#include <asm/bug.h> 29#include <asm/bug.h>
29#include <asm/signal.h> 30#include <asm/signal.h>
31#endif
30 32
31#define ARB_TIMER 0x008
32#define ARB_ERR_CAP_CLR 0x7e4
33#define ARB_ERR_CAP_CLEAR (1 << 0) 33#define ARB_ERR_CAP_CLEAR (1 << 0)
34#define ARB_ERR_CAP_HI_ADDR 0x7e8
35#define ARB_ERR_CAP_ADDR 0x7ec
36#define ARB_ERR_CAP_DATA 0x7f0
37#define ARB_ERR_CAP_STATUS 0x7f4
38#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12) 34#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
39#define ARB_ERR_CAP_STATUS_TEA (1 << 11) 35#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
40#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2) 36#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2)
41#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c 37#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c
42#define ARB_ERR_CAP_STATUS_WRITE (1 << 1) 38#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
43#define ARB_ERR_CAP_STATUS_VALID (1 << 0) 39#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
44#define ARB_ERR_CAP_MASTER 0x7f8 40
41enum {
42 ARB_TIMER,
43 ARB_ERR_CAP_CLR,
44 ARB_ERR_CAP_HI_ADDR,
45 ARB_ERR_CAP_ADDR,
46 ARB_ERR_CAP_DATA,
47 ARB_ERR_CAP_STATUS,
48 ARB_ERR_CAP_MASTER,
49};
50
51static const int gisb_offsets_bcm7038[] = {
52 [ARB_TIMER] = 0x00c,
53 [ARB_ERR_CAP_CLR] = 0x0c4,
54 [ARB_ERR_CAP_HI_ADDR] = -1,
55 [ARB_ERR_CAP_ADDR] = 0x0c8,
56 [ARB_ERR_CAP_DATA] = 0x0cc,
57 [ARB_ERR_CAP_STATUS] = 0x0d0,
58 [ARB_ERR_CAP_MASTER] = -1,
59};
60
61static const int gisb_offsets_bcm7400[] = {
62 [ARB_TIMER] = 0x00c,
63 [ARB_ERR_CAP_CLR] = 0x0c8,
64 [ARB_ERR_CAP_HI_ADDR] = -1,
65 [ARB_ERR_CAP_ADDR] = 0x0cc,
66 [ARB_ERR_CAP_DATA] = 0x0d0,
67 [ARB_ERR_CAP_STATUS] = 0x0d4,
68 [ARB_ERR_CAP_MASTER] = 0x0d8,
69};
70
71static const int gisb_offsets_bcm7435[] = {
72 [ARB_TIMER] = 0x00c,
73 [ARB_ERR_CAP_CLR] = 0x168,
74 [ARB_ERR_CAP_HI_ADDR] = -1,
75 [ARB_ERR_CAP_ADDR] = 0x16c,
76 [ARB_ERR_CAP_DATA] = 0x170,
77 [ARB_ERR_CAP_STATUS] = 0x174,
78 [ARB_ERR_CAP_MASTER] = 0x178,
79};
80
81static const int gisb_offsets_bcm7445[] = {
82 [ARB_TIMER] = 0x008,
83 [ARB_ERR_CAP_CLR] = 0x7e4,
84 [ARB_ERR_CAP_HI_ADDR] = 0x7e8,
85 [ARB_ERR_CAP_ADDR] = 0x7ec,
86 [ARB_ERR_CAP_DATA] = 0x7f0,
87 [ARB_ERR_CAP_STATUS] = 0x7f4,
88 [ARB_ERR_CAP_MASTER] = 0x7f8,
89};
45 90
46struct brcmstb_gisb_arb_device { 91struct brcmstb_gisb_arb_device {
47 void __iomem *base; 92 void __iomem *base;
93 const int *gisb_offsets;
48 struct mutex lock; 94 struct mutex lock;
49 struct list_head next; 95 struct list_head next;
50 u32 valid_mask; 96 u32 valid_mask;
@@ -54,6 +100,26 @@ struct brcmstb_gisb_arb_device {
54 100
55static LIST_HEAD(brcmstb_gisb_arb_device_list); 101static LIST_HEAD(brcmstb_gisb_arb_device_list);
56 102
103static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
104{
105 int offset = gdev->gisb_offsets[reg];
106
107 /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
108 if (offset == -1)
109 return 1;
110
111 return ioread32(gdev->base + offset);
112}
113
114static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
115{
116 int offset = gdev->gisb_offsets[reg];
117
118 if (offset == -1)
119 return;
120 iowrite32(val, gdev->base + reg);
121}
122
57static ssize_t gisb_arb_get_timeout(struct device *dev, 123static ssize_t gisb_arb_get_timeout(struct device *dev,
58 struct device_attribute *attr, 124 struct device_attribute *attr,
59 char *buf) 125 char *buf)
@@ -63,7 +129,7 @@ static ssize_t gisb_arb_get_timeout(struct device *dev,
63 u32 timeout; 129 u32 timeout;
64 130
65 mutex_lock(&gdev->lock); 131 mutex_lock(&gdev->lock);
66 timeout = ioread32(gdev->base + ARB_TIMER); 132 timeout = gisb_read(gdev, ARB_TIMER);
67 mutex_unlock(&gdev->lock); 133 mutex_unlock(&gdev->lock);
68 134
69 return sprintf(buf, "%d", timeout); 135 return sprintf(buf, "%d", timeout);
@@ -85,7 +151,7 @@ static ssize_t gisb_arb_set_timeout(struct device *dev,
85 return -EINVAL; 151 return -EINVAL;
86 152
87 mutex_lock(&gdev->lock); 153 mutex_lock(&gdev->lock);
88 iowrite32(val, gdev->base + ARB_TIMER); 154 gisb_write(gdev, val, ARB_TIMER);
89 mutex_unlock(&gdev->lock); 155 mutex_unlock(&gdev->lock);
90 156
91 return count; 157 return count;
@@ -112,18 +178,18 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
112 const char *m_name; 178 const char *m_name;
113 char m_fmt[11]; 179 char m_fmt[11];
114 180
115 cap_status = ioread32(gdev->base + ARB_ERR_CAP_STATUS); 181 cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS);
116 182
117 /* Invalid captured address, bail out */ 183 /* Invalid captured address, bail out */
118 if (!(cap_status & ARB_ERR_CAP_STATUS_VALID)) 184 if (!(cap_status & ARB_ERR_CAP_STATUS_VALID))
119 return 1; 185 return 1;
120 186
121 /* Read the address and master */ 187 /* Read the address and master */
122 arb_addr = ioread32(gdev->base + ARB_ERR_CAP_ADDR) & 0xffffffff; 188 arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff;
123#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) 189#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
124 arb_addr |= (u64)ioread32(gdev->base + ARB_ERR_CAP_HI_ADDR) << 32; 190 arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
125#endif 191#endif
126 master = ioread32(gdev->base + ARB_ERR_CAP_MASTER); 192 master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
127 193
128 m_name = brcmstb_gisb_master_to_str(gdev, master); 194 m_name = brcmstb_gisb_master_to_str(gdev, master);
129 if (!m_name) { 195 if (!m_name) {
@@ -138,11 +204,12 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
138 m_name); 204 m_name);
139 205
140 /* clear the GISB error */ 206 /* clear the GISB error */
141 iowrite32(ARB_ERR_CAP_CLEAR, gdev->base + ARB_ERR_CAP_CLR); 207 gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR);
142 208
143 return 0; 209 return 0;
144} 210}
145 211
212#ifdef CONFIG_ARM
146static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr, 213static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr,
147 struct pt_regs *regs) 214 struct pt_regs *regs)
148{ 215{
@@ -161,6 +228,7 @@ static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr,
161 228
162 return ret; 229 return ret;
163} 230}
231#endif
164 232
165static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id) 233static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id)
166{ 234{
@@ -188,10 +256,20 @@ static struct attribute_group gisb_arb_sysfs_attr_group = {
188 .attrs = gisb_arb_sysfs_attrs, 256 .attrs = gisb_arb_sysfs_attrs,
189}; 257};
190 258
191static int brcmstb_gisb_arb_probe(struct platform_device *pdev) 259static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
260 { .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 },
261 { .compatible = "brcm,bcm7445-gisb-arb", .data = gisb_offsets_bcm7445 },
262 { .compatible = "brcm,bcm7435-gisb-arb", .data = gisb_offsets_bcm7435 },
263 { .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 },
264 { .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 },
265 { },
266};
267
268static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
192{ 269{
193 struct device_node *dn = pdev->dev.of_node; 270 struct device_node *dn = pdev->dev.of_node;
194 struct brcmstb_gisb_arb_device *gdev; 271 struct brcmstb_gisb_arb_device *gdev;
272 const struct of_device_id *of_id;
195 struct resource *r; 273 struct resource *r;
196 int err, timeout_irq, tea_irq; 274 int err, timeout_irq, tea_irq;
197 unsigned int num_masters, j = 0; 275 unsigned int num_masters, j = 0;
@@ -212,6 +290,13 @@ static int brcmstb_gisb_arb_probe(struct platform_device *pdev)
212 if (IS_ERR(gdev->base)) 290 if (IS_ERR(gdev->base))
213 return PTR_ERR(gdev->base); 291 return PTR_ERR(gdev->base);
214 292
293 of_id = of_match_node(brcmstb_gisb_arb_of_match, dn);
294 if (!of_id) {
295 pr_err("failed to look up compatible string\n");
296 return -EINVAL;
297 }
298 gdev->gisb_offsets = of_id->data;
299
215 err = devm_request_irq(&pdev->dev, timeout_irq, 300 err = devm_request_irq(&pdev->dev, timeout_irq,
216 brcmstb_gisb_timeout_handler, 0, pdev->name, 301 brcmstb_gisb_timeout_handler, 0, pdev->name,
217 gdev); 302 gdev);
@@ -257,8 +342,10 @@ static int brcmstb_gisb_arb_probe(struct platform_device *pdev)
257 342
258 list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list); 343 list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
259 344
345#ifdef CONFIG_ARM
260 hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0, 346 hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0,
261 "imprecise external abort"); 347 "imprecise external abort");
348#endif
262 349
263 dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n", 350 dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n",
264 gdev->base, timeout_irq, tea_irq); 351 gdev->base, timeout_irq, tea_irq);
@@ -272,7 +359,7 @@ static int brcmstb_gisb_arb_suspend(struct device *dev)
272 struct platform_device *pdev = to_platform_device(dev); 359 struct platform_device *pdev = to_platform_device(dev);
273 struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); 360 struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
274 361
275 gdev->saved_timeout = ioread32(gdev->base + ARB_TIMER); 362 gdev->saved_timeout = gisb_read(gdev, ARB_TIMER);
276 363
277 return 0; 364 return 0;
278} 365}
@@ -285,7 +372,7 @@ static int brcmstb_gisb_arb_resume_noirq(struct device *dev)
285 struct platform_device *pdev = to_platform_device(dev); 372 struct platform_device *pdev = to_platform_device(dev);
286 struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); 373 struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
287 374
288 iowrite32(gdev->saved_timeout, gdev->base + ARB_TIMER); 375 gisb_write(gdev, gdev->saved_timeout, ARB_TIMER);
289 376
290 return 0; 377 return 0;
291} 378}
@@ -299,13 +386,7 @@ static const struct dev_pm_ops brcmstb_gisb_arb_pm_ops = {
299 .resume_noirq = brcmstb_gisb_arb_resume_noirq, 386 .resume_noirq = brcmstb_gisb_arb_resume_noirq,
300}; 387};
301 388
302static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
303 { .compatible = "brcm,gisb-arb" },
304 { },
305};
306
307static struct platform_driver brcmstb_gisb_arb_driver = { 389static struct platform_driver brcmstb_gisb_arb_driver = {
308 .probe = brcmstb_gisb_arb_probe,
309 .driver = { 390 .driver = {
310 .name = "brcm-gisb-arb", 391 .name = "brcm-gisb-arb",
311 .owner = THIS_MODULE, 392 .owner = THIS_MODULE,
@@ -316,7 +397,8 @@ static struct platform_driver brcmstb_gisb_arb_driver = {
316 397
317static int __init brcm_gisb_driver_init(void) 398static int __init brcm_gisb_driver_init(void)
318{ 399{
319 return platform_driver_register(&brcmstb_gisb_arb_driver); 400 return platform_driver_probe(&brcmstb_gisb_arb_driver,
401 brcmstb_gisb_arb_probe);
320} 402}
321 403
322module_init(brcm_gisb_driver_init); 404module_init(brcm_gisb_driver_init);
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index 531ae591783b..17d86595951c 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -222,10 +222,14 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
222 } 222 }
223 223
224 /* Error found so break the for loop */ 224 /* Error found so break the for loop */
225 break; 225 return IRQ_HANDLED;
226 } 226 }
227 } 227 }
228 return IRQ_HANDLED; 228
229 dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
230 inttype ? "debug" : "application");
231
232 return IRQ_NONE;
229} 233}
230 234
231static const struct of_device_id l3_noc_match[] = { 235static const struct of_device_id l3_noc_match[] = {
@@ -296,11 +300,66 @@ static int omap_l3_probe(struct platform_device *pdev)
296 return ret; 300 return ret;
297} 301}
298 302
303#ifdef CONFIG_PM
304
305/**
306 * l3_resume_noirq() - resume function for l3_noc
307 * @dev: pointer to l3_noc device structure
308 *
309 * We only have the resume handler only since we
310 * have already maintained the delta register
311 * configuration as part of configuring the system
312 */
313static int l3_resume_noirq(struct device *dev)
314{
315 struct omap_l3 *l3 = dev_get_drvdata(dev);
316 int i;
317 struct l3_flagmux_data *flag_mux;
318 void __iomem *base, *mask_regx = NULL;
319 u32 mask_val;
320
321 for (i = 0; i < l3->num_modules; i++) {
322 base = l3->l3_base[i];
323 flag_mux = l3->l3_flagmux[i];
324 if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
325 continue;
326
327 mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
328 (L3_APPLICATION_ERROR << 3);
329 mask_val = readl_relaxed(mask_regx);
330 mask_val &= ~(flag_mux->mask_app_bits);
331
332 writel_relaxed(mask_val, mask_regx);
333 mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
334 (L3_DEBUG_ERROR << 3);
335 mask_val = readl_relaxed(mask_regx);
336 mask_val &= ~(flag_mux->mask_dbg_bits);
337
338 writel_relaxed(mask_val, mask_regx);
339 }
340
341 /* Dummy read to force OCP barrier */
342 if (mask_regx)
343 (void)readl(mask_regx);
344
345 return 0;
346}
347
348static const struct dev_pm_ops l3_dev_pm_ops = {
349 .resume_noirq = l3_resume_noirq,
350};
351
352#define L3_DEV_PM_OPS (&l3_dev_pm_ops)
353#else
354#define L3_DEV_PM_OPS NULL
355#endif
356
299static struct platform_driver omap_l3_driver = { 357static struct platform_driver omap_l3_driver = {
300 .probe = omap_l3_probe, 358 .probe = omap_l3_probe,
301 .driver = { 359 .driver = {
302 .name = "omap_l3_noc", 360 .name = "omap_l3_noc",
303 .owner = THIS_MODULE, 361 .owner = THIS_MODULE,
362 .pm = L3_DEV_PM_OPS,
304 .of_match_table = of_match_ptr(l3_noc_match), 363 .of_match_table = of_match_ptr(l3_noc_match),
305 }, 364 },
306}; 365};