aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan O'Donoghue <pure.logic@nexus-software.ie>2016-02-22 20:29:59 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-23 01:37:23 -0500
commitc637fa5294cefeda8be73cce20ba6693d22262dc (patch)
treee86c2de019da8459d2cccfbcb71ce5e1b58dc3c1
parentfb86780bf7708cd6553f592a6318f10eda766127 (diff)
x86/platform/intel/quark: Drop IMR lock bit support
Isolated Memory Regions support a lock bit. The lock bit in an IMR prevents modification of the IMR until the core goes through a warm or cold reset. The lock bit feature is not useful in the context of the kernel API and is not really necessary since modification of IMRs is possible only from ring-zero anyway. This patch drops support for IMR locks bits, it simplifies the kernel API and removes an unnecessary and needlessly complex feature. Suggested-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Bryan O'Donoghue <pure.logic@nexus-software.ie> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: andriy.shevchenko@linux.intel.com Cc: boon.leong.ong@intel.com Cc: paul.gortmaker@windriver.com Link: http://lkml.kernel.org/r/1456190999-12685-3-git-send-email-pure.logic@nexus-software.ie Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/imr.h2
-rw-r--r--arch/x86/platform/intel-quark/imr.c24
-rw-r--r--arch/x86/platform/intel-quark/imr_selftest.c15
3 files changed, 14 insertions, 27 deletions
diff --git a/arch/x86/include/asm/imr.h b/arch/x86/include/asm/imr.h
index cd2ce4068441..ebea2c9d2cdc 100644
--- a/arch/x86/include/asm/imr.h
+++ b/arch/x86/include/asm/imr.h
@@ -53,7 +53,7 @@
53#define IMR_MASK (IMR_ALIGN - 1) 53#define IMR_MASK (IMR_ALIGN - 1)
54 54
55int imr_add_range(phys_addr_t base, size_t size, 55int imr_add_range(phys_addr_t base, size_t size,
56 unsigned int rmask, unsigned int wmask, bool lock); 56 unsigned int rmask, unsigned int wmask);
57 57
58int imr_remove_range(phys_addr_t base, size_t size); 58int imr_remove_range(phys_addr_t base, size_t size);
59 59
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
index 740445a53363..17d6d2296e4d 100644
--- a/arch/x86/platform/intel-quark/imr.c
+++ b/arch/x86/platform/intel-quark/imr.c
@@ -134,11 +134,9 @@ static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
134 * @idev: pointer to imr_device structure. 134 * @idev: pointer to imr_device structure.
135 * @imr_id: IMR entry to write. 135 * @imr_id: IMR entry to write.
136 * @imr: IMR structure representing address and access masks. 136 * @imr: IMR structure representing address and access masks.
137 * @lock: indicates if the IMR lock bit should be applied.
138 * @return: 0 on success or error code passed from mbi_iosf on failure. 137 * @return: 0 on success or error code passed from mbi_iosf on failure.
139 */ 138 */
140static int imr_write(struct imr_device *idev, u32 imr_id, 139static int imr_write(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
141 struct imr_regs *imr, bool lock)
142{ 140{
143 unsigned long flags; 141 unsigned long flags;
144 u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; 142 u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
@@ -162,15 +160,6 @@ static int imr_write(struct imr_device *idev, u32 imr_id,
162 if (ret) 160 if (ret)
163 goto failed; 161 goto failed;
164 162
165 /* Lock bit must be set separately to addr_lo address bits. */
166 if (lock) {
167 imr->addr_lo |= IMR_LOCK;
168 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE,
169 reg - IMR_NUM_REGS, imr->addr_lo);
170 if (ret)
171 goto failed;
172 }
173
174 local_irq_restore(flags); 163 local_irq_restore(flags);
175 return 0; 164 return 0;
176failed: 165failed:
@@ -322,11 +311,10 @@ static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr)
322 * @size: physical size of region in bytes must be aligned to 1KiB. 311 * @size: physical size of region in bytes must be aligned to 1KiB.
323 * @read_mask: read access mask. 312 * @read_mask: read access mask.
324 * @write_mask: write access mask. 313 * @write_mask: write access mask.
325 * @lock: indicates whether or not to permanently lock this region.
326 * @return: zero on success or negative value indicating error. 314 * @return: zero on success or negative value indicating error.
327 */ 315 */
328int imr_add_range(phys_addr_t base, size_t size, 316int imr_add_range(phys_addr_t base, size_t size,
329 unsigned int rmask, unsigned int wmask, bool lock) 317 unsigned int rmask, unsigned int wmask)
330{ 318{
331 phys_addr_t end; 319 phys_addr_t end;
332 unsigned int i; 320 unsigned int i;
@@ -399,7 +387,7 @@ int imr_add_range(phys_addr_t base, size_t size,
399 imr.rmask = rmask; 387 imr.rmask = rmask;
400 imr.wmask = wmask; 388 imr.wmask = wmask;
401 389
402 ret = imr_write(idev, reg, &imr, lock); 390 ret = imr_write(idev, reg, &imr);
403 if (ret < 0) { 391 if (ret < 0) {
404 /* 392 /*
405 * In the highly unlikely event iosf_mbi_write failed 393 * In the highly unlikely event iosf_mbi_write failed
@@ -410,7 +398,7 @@ int imr_add_range(phys_addr_t base, size_t size,
410 imr.addr_hi = 0; 398 imr.addr_hi = 0;
411 imr.rmask = IMR_READ_ACCESS_ALL; 399 imr.rmask = IMR_READ_ACCESS_ALL;
412 imr.wmask = IMR_WRITE_ACCESS_ALL; 400 imr.wmask = IMR_WRITE_ACCESS_ALL;
413 imr_write(idev, reg, &imr, false); 401 imr_write(idev, reg, &imr);
414 } 402 }
415failed: 403failed:
416 mutex_unlock(&idev->lock); 404 mutex_unlock(&idev->lock);
@@ -506,7 +494,7 @@ static int __imr_remove_range(int reg, phys_addr_t base, size_t size)
506 imr.rmask = IMR_READ_ACCESS_ALL; 494 imr.rmask = IMR_READ_ACCESS_ALL;
507 imr.wmask = IMR_WRITE_ACCESS_ALL; 495 imr.wmask = IMR_WRITE_ACCESS_ALL;
508 496
509 ret = imr_write(idev, reg, &imr, false); 497 ret = imr_write(idev, reg, &imr);
510 498
511failed: 499failed:
512 mutex_unlock(&idev->lock); 500 mutex_unlock(&idev->lock);
@@ -587,7 +575,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
587 * We don't round up @size since it is already PAGE_SIZE aligned. 575 * We don't round up @size since it is already PAGE_SIZE aligned.
588 * See vmlinux.lds.S for details. 576 * See vmlinux.lds.S for details.
589 */ 577 */
590 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); 578 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
591 if (ret < 0) { 579 if (ret < 0) {
592 pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", 580 pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
593 size / 1024, start, end); 581 size / 1024, start, end);
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
index 0381343a0d3a..f5bad40936ac 100644
--- a/arch/x86/platform/intel-quark/imr_selftest.c
+++ b/arch/x86/platform/intel-quark/imr_selftest.c
@@ -60,30 +60,30 @@ static void __init imr_self_test(void)
60 int ret; 60 int ret;
61 61
62 /* Test zero zero. */ 62 /* Test zero zero. */
63 ret = imr_add_range(0, 0, 0, 0, false); 63 ret = imr_add_range(0, 0, 0, 0);
64 imr_self_test_result(ret < 0, "zero sized IMR\n"); 64 imr_self_test_result(ret < 0, "zero sized IMR\n");
65 65
66 /* Test exact overlap. */ 66 /* Test exact overlap. */
67 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); 67 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
68 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); 68 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
69 69
70 /* Test overlap with base inside of existing. */ 70 /* Test overlap with base inside of existing. */
71 base += size - IMR_ALIGN; 71 base += size - IMR_ALIGN;
72 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); 72 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
73 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); 73 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
74 74
75 /* Test overlap with end inside of existing. */ 75 /* Test overlap with end inside of existing. */
76 base -= size + IMR_ALIGN * 2; 76 base -= size + IMR_ALIGN * 2;
77 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); 77 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
78 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); 78 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
79 79
80 /* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */ 80 /* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */
81 ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL, 81 ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL,
82 IMR_WRITE_ACCESS_ALL, false); 82 IMR_WRITE_ACCESS_ALL);
83 imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n"); 83 imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n");
84 84
85 /* Test that a 1 KiB IMR @ zero with CPU only will work. */ 85 /* Test that a 1 KiB IMR @ zero with CPU only will work. */
86 ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU, false); 86 ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU);
87 imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n"); 87 imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n");
88 if (ret >= 0) { 88 if (ret >= 0) {
89 ret = imr_remove_range(0, IMR_ALIGN); 89 ret = imr_remove_range(0, IMR_ALIGN);
@@ -92,8 +92,7 @@ static void __init imr_self_test(void)
92 92
93 /* Test 2 KiB works. */ 93 /* Test 2 KiB works. */
94 size = IMR_ALIGN * 2; 94 size = IMR_ALIGN * 2;
95 ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL, 95 ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL, IMR_WRITE_ACCESS_ALL);
96 IMR_WRITE_ACCESS_ALL, false);
97 imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n"); 96 imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n");
98 if (ret >= 0) { 97 if (ret >= 0) {
99 ret = imr_remove_range(0, size); 98 ret = imr_remove_range(0, size);