aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/scripting-engines
diff options
context:
space:
mode:
authorBen Gardiner <bengardiner@nanometrics.ca>2011-05-30 14:56:14 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2011-06-03 11:12:31 -0400
commit4f1ab9b01d34eac9fc958f7150d3bf266dcc1685 (patch)
tree77ff5efa6449743984e77f491902fa53f7716969 /tools/perf/util/scripting-engines
parent837072377034d0a0b18b851d1ab95676b245cc0a (diff)
UBIFS: assert no fixup when writing a node
The current free space fixup can result in some writing to the UBI volume when the space_fixup flag is set. To catch instances where UBIFS is writing to the NAND while the space_fixup flag is set, add an assert to ubifs_write_node(). Artem: tweaked the patch, added similar assertion to the write buffer write path. Signed-off-by: Ben Gardiner <bengardiner@nanometrics.ca> Reviewed-by: Matthew L. Creech <mlcreech@gmail.com> Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'tools/perf/util/scripting-engines')
0 files changed, 0 insertions, 0 deletions
ef='#n280'>280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
 /*
 * Denver15 Uncore PMU support
 *
 * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

 #include <linux/version.h>

/*
* perf events refactored include structure starting with 4.4
* This driver is only valid with kernel version 4.4 and greater
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
#include <asm/irq_regs.h>

#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <linux/tegra-mce.h>
#include <linux/platform/tegra/tegra18_cpu_map.h>

#include "dmce_perfmon.h"

#define DENVERPMU_MAX_HWEVENTS		8

/*
 * D15 perf uncore supports two counters
 */
#define DENVER_MAX_UNCORE_CNTS		2

/*
 * PMXEVTYPER: Event selection reg
 */
#define ARMV8_EVTYPE_EVENT	0x3ff		/* Mask for EVENT bits */
#define DENVER_EVTYPE_MASK	0x00000d00
#define DENVER_EVTYPE_EVENT_ID	0x0ff

static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);

#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))

enum denver_uncore_perf_types {
	DENVER_PMU_L2D_CACHE = 0x16,
	DENVER_PMU_L2D_CACHE_REFILL,
	DENVER_PMU_L2D_CACHE_WB,
	DENVER_PMU_L2D_CACHE_LD = 0x50,
	DENVER_PMU_L2D_CACHE_ST,
	DENVER_PMU_L2D_CACHE_REFILL_LD,
	DENVER_PMU_L2D_CACHE_REFILL_ST,
	DENVER_PMU_L2D_CACHE_WB_VIC_TIM = 0x56
};

/*
 * Perf Events' indices
 */
#define ARMV8_IDX_CYCLE_COUNTER	0
#define ARMV8_IDX_COUNTER0	1
#define ARMV8_MAX_COUNTERS	32
#define ARMV8_COUNTER_MASK	(ARMV8_MAX_COUNTERS - 1)

/*
 * Perf Event to low level counters mapping
 */
#define ARMV8_IDX_TO_COUNTER(x) \
	(((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)

/*
 * Per-CPU PMCR: config reg
 */
#define ARMV8_PMCR_E		(1 << 0) /* Enable all counters */
#define ARMV8_PMCR_P		(1 << 1) /* Reset all counters */
#define ARMV8_PMCR_C		(1 << 2) /* Cycle counter reset */
#define ARMV8_PMCR_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV8_PMCR_X		(1 << 4) /* Export to ETM */
#define ARMV8_PMCR_DP		(1 << 5) /* Disable CCNT if non-invasive dbg */
#define ARMV8_PMCR_N_SHIFT	11	 /* Number of counters supported */
#define ARMV8_PMCR_N_MASK	0x1f
#define ARMV8_PMCR_MASK		0x3f	 /* Mask for writable bits */

/*
 * PMOVSR: counters overflow flag status reg
 */
#define ARMV8_OVSR_MASK		0xffffffff	/* Mask for writable bits */
#define ARMV8_OVERFLOWED_MASK	ARMV8_OVSR_MASK

/*
 * Event filters for PMUv3
 */
#define ARMV8_EXCLUDE_EL1	(1 << 31)
#define ARMV8_EXCLUDE_EL0	(1 << 30)
#define ARMV8_INCLUDE_EL2	(1 << 27)

static struct dmce_perfmon_cnt_info denver_uncore_event[DENVER_MAX_UNCORE_CNTS];

static u32 mce_perfmon_rw(uint8_t command, uint8_t group, uint8_t unit,
		   uint8_t reg, uint8_t counter, u32 *data)
{
	union dmce_perfmon_ari_request_hi_t r;
	u32 status = -1;
	u32 cpu = smp_processor_id();

	if (!tegra18_is_cpu_denver(cpu))
		return status;

	r.bits.command = command;
	r.bits.group = group;
	r.bits.unit = unit;
	r.bits.reg = reg;
	r.bits.counter = counter;

	if (command == DMCE_PERFMON_COMMAND_WRITE)
		status = tegra_mce_write_uncore_perfmon(r.flat, *data);
	else if (command == DMCE_PERFMON_COMMAND_READ)
		status = tegra_mce_read_uncore_perfmon(r.flat, data);
	else
		pr_err("perfmon command not recognized");

	if (status != DMCE_PERFMON_STATUS_SUCCESS) {
		pr_err("perfmon status error: %u", status);
		pr_info("ARI CMD:%x REG:%x CTR:%x Data:%x\n", command, reg,
			counter, *data);
	}

	return status;
}

static inline int get_ctr_info(u32 idx, struct dmce_perfmon_cnt_info *info)
{
	int i;

	for (i = 0; i < DENVER_MAX_UNCORE_CNTS; i++) {
		if (denver_uncore_event[i].index == idx &&
			denver_uncore_event[i].valid == 1) {
			*info = denver_uncore_event[i];
			return 0;
		}
	}

	return -1;
}

static inline int alloc_denver_ctr(u32 idx, u32 group, u32 event)
{
	int i;
	struct dmce_perfmon_cnt_info info;

	if (get_ctr_info(idx, &info) < 0) {
		for (i = 0; i < DENVER_MAX_UNCORE_CNTS; i++) {
			if (denver_uncore_event[i].valid == 0) {
				denver_uncore_event[i].counter = event;
				denver_uncore_event[i].group = group;
				denver_uncore_event[i].unit = 0;
				denver_uncore_event[i].index = idx;
				denver_uncore_event[i].idx = i;
				denver_uncore_event[i].valid = 1;
				break;
			}
		}

		if (i == DENVER_MAX_UNCORE_CNTS) {
			pr_err("Failed to allocate D15 uncore ctr\n");
			return -1;
		}
	}

	return 0;
}

static inline int clear_denver_ctr(u32 idx)
{
	int i;

	for (i = 0; i < DENVER_MAX_UNCORE_CNTS; i++) {
		if (denver_uncore_event[i].index == idx) {
			denver_uncore_event[i].valid = 0;
			break;
		}
	}

	return 0;
}

static inline int get_uncore_group(u32 event)
{
	u32 cpu = smp_processor_id();

	if (!tegra18_is_cpu_denver(cpu))
		return -1;

	switch (event) {
	case DENVER_PMU_L2D_CACHE:
	case DENVER_PMU_L2D_CACHE_REFILL:
	case DENVER_PMU_L2D_CACHE_WB:
	case DENVER_PMU_L2D_CACHE_LD:
	case DENVER_PMU_L2D_CACHE_ST:
	case DENVER_PMU_L2D_CACHE_REFILL_LD:
	case DENVER_PMU_L2D_CACHE_REFILL_ST:
	case DENVER_PMU_L2D_CACHE_WB_VIC_TIM:
		return 0;
	default:
		return -1;
	}
}

static inline int denverpmu_counter_valid(u32 idx,
		struct dmce_perfmon_cnt_info *info)
{
	int ret;

	if (get_ctr_info(idx, info) < 0)
		return 0;

	ret = get_uncore_group(info->counter);

	return ret >= 0 ? 1 : 0;
}

static int
denverpmu_map_raw_event(u32 raw_event_mask, u64 config)
{
	if (get_uncore_group(config & DENVER_EVTYPE_EVENT_ID) < 0)
		return -ENOENT;
	else
		return (int)(config & raw_event_mask);
}

static inline int denver15pmu_has_overflowed(u32 pmovsr)
{
	return pmovsr & ARMV8_OVERFLOWED_MASK;
}

static inline int denver15pmu_counter_has_overflowed(u32 pmnc, int idx)
{
	int ret = 0;
	u32 counter;
	struct dmce_perfmon_cnt_info info;

	if (!denverpmu_counter_valid(idx, &info)) {
		pr_err("CPU%u checking wrong counter %d overflow status\n",
			smp_processor_id(), idx);
	} else {
		counter = ARMV8_IDX_TO_COUNTER(idx);
		ret = pmnc & BIT(counter);
	}

	return ret;
}

static inline u32 denver15pmu_read_counter(struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;
	int idx = hwc->idx;
	struct dmce_perfmon_cnt_info info;
	u32 value = 0;

	if (denverpmu_counter_valid(idx, &info))
		mce_perfmon_rw(DMCE_PERFMON_COMMAND_READ, info.group,
				info.unit, NV_PMEVCNTR, info.idx, &value);
	else
		pr_err("CPU%u reading wrong counter %d\n",
			smp_processor_id(), idx);

	return value;
}

static inline void denver15pmu_write_counter(struct perf_event *event,
		u32 value)
{
	struct hw_perf_event *hwc = &event->hw;
	int idx = hwc->idx;
	struct dmce_perfmon_cnt_info info;

	if (denverpmu_counter_valid(idx, &info))
		mce_perfmon_rw(DMCE_PERFMON_COMMAND_WRITE, info.group,
				info.unit, NV_PMEVCNTR, info.idx, &value);
	else
		pr_err("CPU%u writing wrong counter %d\n",
			smp_processor_id(), idx);
}

static inline void denver15pmu_write_evtype(int idx, u32 val)
{
	struct dmce_perfmon_cnt_info info;

	val &= DENVER_EVTYPE_EVENT_ID;
	if (denverpmu_counter_valid(idx, &info))
		mce_perfmon_rw(DMCE_PERFMON_COMMAND_WRITE, info.group,
				info.unit, NV_PMEVTYPER, info.idx, &val);
}

static inline int denver15pmu_enable_counter(int idx)
{
	struct dmce_perfmon_cnt_info info;
	u32 data = 0;

	if (denverpmu_counter_valid(idx, &info)) {
		data = BIT(ARMV8_IDX_TO_COUNTER(idx));
		mce_perfmon_rw(DMCE_PERFMON_COMMAND_WRITE, info.group,
				info.unit, NV_PMCNTENSET, 0, &data);
	} else {
		pr_err("CPU%u enabling wrong PMNC counter %d\n",
			smp_processor_id(), idx);
		return -EINVAL;
	}

	return idx;
}

static inline int denver15pmu_disable_counter(int idx)
{
	u32 data = 0;