aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBarry Song <Baohua.Song@csr.com>2011-09-30 09:43:12 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-10-17 04:11:51 -0400
commit91c2ebb90b1890abc648ba9dec5608cbc97e1cb9 (patch)
treeb9f9934aa1b17f3529d2c6423cfacc54bb626724 /arch
parent8d4e652d1b2539196efaef051956fa29e22e9c10 (diff)
ARM: 7114/1: cache-l2x0: add resume entry for l2 in secure mode
we save the l2x0 registers at the first initialization, and platform codes can get them to restore l2x0 status after wakeup. Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Barry Song <Baohua.Song@csr.com> Reviewed-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Tested-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h25
-rw-r--r--arch/arm/include/asm/outercache.h7
-rw-r--r--arch/arm/kernel/asm-offsets.c12
-rw-r--r--arch/arm/mm/cache-l2x0.c129
4 files changed, 163 insertions, 10 deletions
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index c48cb1e1c46c..434edccdf7f3 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -67,6 +67,13 @@
67#define L2X0_CACHE_ID_PART_MASK (0xf << 6) 67#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
68#define L2X0_CACHE_ID_PART_L210 (1 << 6) 68#define L2X0_CACHE_ID_PART_L210 (1 << 6)
69#define L2X0_CACHE_ID_PART_L310 (3 << 6) 69#define L2X0_CACHE_ID_PART_L310 (3 << 6)
70#define L2X0_CACHE_ID_RTL_MASK 0x3f
71#define L2X0_CACHE_ID_RTL_R0P0 0x0
72#define L2X0_CACHE_ID_RTL_R1P0 0x2
73#define L2X0_CACHE_ID_RTL_R2P0 0x4
74#define L2X0_CACHE_ID_RTL_R3P0 0x5
75#define L2X0_CACHE_ID_RTL_R3P1 0x6
76#define L2X0_CACHE_ID_RTL_R3P2 0x8
70 77
71#define L2X0_AUX_CTRL_MASK 0xc0000fff 78#define L2X0_AUX_CTRL_MASK 0xc0000fff
72#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0 79#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
@@ -96,6 +103,24 @@
96#ifndef __ASSEMBLY__ 103#ifndef __ASSEMBLY__
97extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); 104extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
98extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask); 105extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask);
106
107struct l2x0_regs {
108 unsigned long phy_base;
109 unsigned long aux_ctrl;
110 /*
111 * Whether the following registers need to be saved/restored
112 * depends on platform
113 */
114 unsigned long tag_latency;
115 unsigned long data_latency;
116 unsigned long filter_start;
117 unsigned long filter_end;
118 unsigned long prefetch_ctrl;
119 unsigned long pwr_ctrl;
120};
121
122extern struct l2x0_regs l2x0_saved_regs;
123
99#endif 124#endif
100 125
101#endif 126#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index d8387437ec5a..53426c66352a 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -34,6 +34,7 @@ struct outer_cache_fns {
34 void (*sync)(void); 34 void (*sync)(void);
35#endif 35#endif
36 void (*set_debug)(unsigned long); 36 void (*set_debug)(unsigned long);
37 void (*resume)(void);
37}; 38};
38 39
39#ifdef CONFIG_OUTER_CACHE 40#ifdef CONFIG_OUTER_CACHE
@@ -74,6 +75,12 @@ static inline void outer_disable(void)
74 outer_cache.disable(); 75 outer_cache.disable();
75} 76}
76 77
78static inline void outer_resume(void)
79{
80 if (outer_cache.resume)
81 outer_cache.resume();
82}
83
77#else 84#else
78 85
79static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) 86static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 16baba2e4369..1429d8989fb9 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -20,6 +20,7 @@
20#include <asm/thread_info.h> 20#include <asm/thread_info.h>
21#include <asm/memory.h> 21#include <asm/memory.h>
22#include <asm/procinfo.h> 22#include <asm/procinfo.h>
23#include <asm/hardware/cache-l2x0.h>
23#include <linux/kbuild.h> 24#include <linux/kbuild.h>
24 25
25/* 26/*
@@ -92,6 +93,17 @@ int main(void)
92 DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); 93 DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
93 DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); 94 DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
94 BLANK(); 95 BLANK();
96#ifdef CONFIG_CACHE_L2X0
97 DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base));
98 DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl));
99 DEFINE(L2X0_R_TAG_LATENCY, offsetof(struct l2x0_regs, tag_latency));
100 DEFINE(L2X0_R_DATA_LATENCY, offsetof(struct l2x0_regs, data_latency));
101 DEFINE(L2X0_R_FILTER_START, offsetof(struct l2x0_regs, filter_start));
102 DEFINE(L2X0_R_FILTER_END, offsetof(struct l2x0_regs, filter_end));
103 DEFINE(L2X0_R_PREFETCH_CTRL, offsetof(struct l2x0_regs, prefetch_ctrl));
104 DEFINE(L2X0_R_PWR_CTRL, offsetof(struct l2x0_regs, pwr_ctrl));
105 BLANK();
106#endif
95#ifdef CONFIG_CPU_HAS_ASID 107#ifdef CONFIG_CPU_HAS_ASID
96 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); 108 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
97 BLANK(); 109 BLANK();
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 0d85d221d7b0..3f9b9980478e 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -33,6 +33,14 @@ static DEFINE_SPINLOCK(l2x0_lock);
33static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 33static uint32_t l2x0_way_mask; /* Bitmask of active ways */
34static uint32_t l2x0_size; 34static uint32_t l2x0_size;
35 35
36struct l2x0_regs l2x0_saved_regs;
37
38struct l2x0_of_data {
39 void (*setup)(const struct device_node *, __u32 *, __u32 *);
40 void (*save)(void);
41 void (*resume)(void);
42};
43
36static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 44static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
37{ 45{
38 /* wait for cache operation by line or way to complete */ 46 /* wait for cache operation by line or way to complete */
@@ -280,7 +288,7 @@ static void l2x0_disable(void)
280 spin_unlock_irqrestore(&l2x0_lock, flags); 288 spin_unlock_irqrestore(&l2x0_lock, flags);
281} 289}
282 290
283static void __init l2x0_unlock(__u32 cache_id) 291static void l2x0_unlock(__u32 cache_id)
284{ 292{
285 int lockregs; 293 int lockregs;
286 int i; 294 int i;
@@ -356,6 +364,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
356 /* l2x0 controller is disabled */ 364 /* l2x0 controller is disabled */
357 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 365 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
358 366
367 l2x0_saved_regs.aux_ctrl = aux;
368
359 l2x0_inv_all(); 369 l2x0_inv_all();
360 370
361 /* enable L2X0 */ 371 /* enable L2X0 */
@@ -445,33 +455,132 @@ static void __init pl310_of_setup(const struct device_node *np,
445 } 455 }
446} 456}
447 457
458static void __init pl310_save(void)
459{
460 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
461 L2X0_CACHE_ID_RTL_MASK;
462
463 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
464 L2X0_TAG_LATENCY_CTRL);
465 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
466 L2X0_DATA_LATENCY_CTRL);
467 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
468 L2X0_ADDR_FILTER_END);
469 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
470 L2X0_ADDR_FILTER_START);
471
472 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
473 /*
474 * From r2p0, there is Prefetch offset/control register
475 */
476 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
477 L2X0_PREFETCH_CTRL);
478 /*
479 * From r3p0, there is Power control register
480 */
481 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
482 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
483 L2X0_POWER_CTRL);
484 }
485}
486
487static void l2x0_resume(void)
488{
489 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
490 /* restore aux ctrl and enable l2 */
491 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
492
493 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
494 L2X0_AUX_CTRL);
495
496 l2x0_inv_all();
497
498 writel_relaxed(1, l2x0_base + L2X0_CTRL);
499 }
500}
501
502static void pl310_resume(void)
503{
504 u32 l2x0_revision;
505
506 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
507 /* restore pl310 setup */
508 writel_relaxed(l2x0_saved_regs.tag_latency,
509 l2x0_base + L2X0_TAG_LATENCY_CTRL);
510 writel_relaxed(l2x0_saved_regs.data_latency,
511 l2x0_base + L2X0_DATA_LATENCY_CTRL);
512 writel_relaxed(l2x0_saved_regs.filter_end,
513 l2x0_base + L2X0_ADDR_FILTER_END);
514 writel_relaxed(l2x0_saved_regs.filter_start,
515 l2x0_base + L2X0_ADDR_FILTER_START);
516
517 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
518 L2X0_CACHE_ID_RTL_MASK;
519
520 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
521 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
522 l2x0_base + L2X0_PREFETCH_CTRL);
523 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
524 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
525 l2x0_base + L2X0_POWER_CTRL);
526 }
527 }
528
529 l2x0_resume();
530}
531
532static const struct l2x0_of_data pl310_data = {
533 pl310_of_setup,
534 pl310_save,
535 pl310_resume,
536};
537
538static const struct l2x0_of_data l2x0_data = {
539 l2x0_of_setup,
540 NULL,
541 l2x0_resume,
542};
543
448static const struct of_device_id l2x0_ids[] __initconst = { 544static const struct of_device_id l2x0_ids[] __initconst = {
449 { .compatible = "arm,pl310-cache", .data = pl310_of_setup }, 545 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
450 { .compatible = "arm,l220-cache", .data = l2x0_of_setup }, 546 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
451 { .compatible = "arm,l210-cache", .data = l2x0_of_setup }, 547 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
452 {} 548 {}
453}; 549};
454 550
455int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask) 551int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask)
456{ 552{
457 struct device_node *np; 553 struct device_node *np;
458 void (*l2_setup)(const struct device_node *np, 554 struct l2x0_of_data *data;
459 __u32 *aux_val, __u32 *aux_mask); 555 struct resource res;
460 556
461 np = of_find_matching_node(NULL, l2x0_ids); 557 np = of_find_matching_node(NULL, l2x0_ids);
462 if (!np) 558 if (!np)
463 return -ENODEV; 559 return -ENODEV;
464 l2x0_base = of_iomap(np, 0); 560
561 if (of_address_to_resource(np, 0, &res))
562 return -ENODEV;
563
564 l2x0_base = ioremap(res.start, resource_size(&res));
465 if (!l2x0_base) 565 if (!l2x0_base)
466 return -ENOMEM; 566 return -ENOMEM;
467 567
568 l2x0_saved_regs.phy_base = res.start;
569
570 data = of_match_node(l2x0_ids, np)->data;
571
468 /* L2 configuration can only be changed if the cache is disabled */ 572 /* L2 configuration can only be changed if the cache is disabled */
469 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 573 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
470 l2_setup = of_match_node(l2x0_ids, np)->data; 574 if (data->setup)
471 if (l2_setup) 575 data->setup(np, &aux_val, &aux_mask);
472 l2_setup(np, &aux_val, &aux_mask);
473 } 576 }
577
578 if (data->save)
579 data->save();
580
474 l2x0_init(l2x0_base, aux_val, aux_mask); 581 l2x0_init(l2x0_base, aux_val, aux_mask);
582
583 outer_cache.resume = data->resume;
475 return 0; 584 return 0;
476} 585}
477#endif 586#endif