aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/platform
diff options
context:
space:
mode:
authorBryan O'Donoghue <pure.logic@nexus-software.ie>2015-01-30 11:29:38 -0500
committerIngo Molnar <mingo@kernel.org>2015-02-18 17:22:47 -0500
commit28a375df16c2b6d01227541f3956568995aa5fda (patch)
tree1936afb9d558126e32959ddd4900c2abbad4cc74 /arch/x86/platform
parente07e0d4cb0c4bfe822ec8491cc06269096a38bea (diff)
x86/intel/quark: Add Isolated Memory Regions for Quark X1000
Intel's Quark X1000 SoC contains a set of registers called Isolated Memory Regions. IMRs are accessed over the IOSF mailbox interface. IMRs are areas carved out of memory that define read/write access rights to the various system agents within the Quark system. For a given agent in the system it is possible to specify if that agent may read or write an area of memory defined by an IMR with a granularity of 1 KiB. Quark_SecureBootPRM_330234_001.pdf section 4.5 details the concept of IMRs quark-x1000-datasheet.pdf section 12.7.4 details the implementation of IMRs in silicon. eSRAM flush, CPU Snoop write-only, CPU SMM Mode, CPU non-SMM mode, RMU and PCIe Virtual Channels (VC0 and VC1) can have individual read/write access masks applied to them for a given memory region in Quark X1000. This enables IMRs to treat each memory transaction type listed above on an individual basis and to filter appropriately based on the IMR access mask for the memory region. Quark supports eight IMRs. Since all of the DMA capable SoC components in the X1000 are mapped to VC0 it is possible to define sections of memory as invalid for DMA write operations originating from Ethernet, USB, SD and any other DMA capable south-cluster component on VC0. Similarly it is possible to mark kernel memory as non-SMM mode read/write only or to mark BIOS runtime memory as SMM mode accessible only depending on the particular memory footprint on a given system. On an IMR violation Quark SoC X1000 systems are configured to reset the system, so ensuring that the IMR memory map is consistent with the EFI provided memory map is critical to ensure no IMR violations reset the system. The API for accessing IMRs is based on MTRR code but doesn't provide a /proc or /sys interface to manipulate IMRs. Defining the size and extent of IMRs is exclusively the domain of in-kernel code. Quark firmware sets up a series of locked IMRs around pieces of memory that firmware owns such as ACPI runtime data. During boot a series of unlocked IMRs are placed around items in memory to guarantee no DMA modification of those items can take place. Grub also places an unlocked IMR around the kernel boot params data structure and compressed kernel image. It is necessary for the kernel to tear down all unlocked IMRs in order to ensure that the kernel's view of memory passed via the EFI memory map is consistent with the IMR memory map. Without tearing down all unlocked IMRs on boot transitory IMRs such as those used to protect the compressed kernel image will cause IMR violations and system reboots. The IMR init code tears down all unlocked IMRs and sets a protective IMR around the kernel .text and .rodata as one contiguous block. This sanitizes the IMR memory map with respect to the EFI memory map and protects the read-only portions of the kernel from unwarranted DMA access. Tested-by: Ong, Boon Leong <boon.leong.ong@intel.com> Signed-off-by: Bryan O'Donoghue <pure.logic@nexus-software.ie> Reviewed-by: Andy Shevchenko <andy.schevchenko@gmail.com> Reviewed-by: Darren Hart <dvhart@linux.intel.com> Reviewed-by: Ong, Boon Leong <boon.leong.ong@intel.com> Cc: andy.shevchenko@gmail.com Cc: dvhart@infradead.org Link: http://lkml.kernel.org/r/1422635379-12476-2-git-send-email-pure.logic@nexus-software.ie Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/platform')
-rw-r--r--arch/x86/platform/intel-quark/Makefile2
-rw-r--r--arch/x86/platform/intel-quark/imr.c668
-rw-r--r--arch/x86/platform/intel-quark/imr_selftest.c129
3 files changed, 799 insertions, 0 deletions
diff --git a/arch/x86/platform/intel-quark/Makefile b/arch/x86/platform/intel-quark/Makefile
new file mode 100644
index 000000000000..9cc57ed36022
--- /dev/null
+++ b/arch/x86/platform/intel-quark/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_INTEL_IMR) += imr.o
2obj-$(CONFIG_DEBUG_IMR_SELFTEST) += imr_selftest.o
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
new file mode 100644
index 000000000000..16e4df1c9290
--- /dev/null
+++ b/arch/x86/platform/intel-quark/imr.c
@@ -0,0 +1,668 @@
1/**
2 * imr.c
3 *
4 * Copyright(c) 2013 Intel Corporation.
5 * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
6 *
7 * IMR registers define an isolated region of memory that can
8 * be masked to prohibit certain system agents from accessing memory.
9 * When a device behind a masked port performs an access - snooped or
10 * not, an IMR may optionally prevent that transaction from changing
11 * the state of memory or from getting correct data in response to the
12 * operation.
13 *
14 * Write data will be dropped and reads will return 0xFFFFFFFF, the
15 * system will reset and system BIOS will print out an error message to
16 * inform the user that an IMR has been violated.
17 *
18 * This code is based on the Linux MTRR code and reference code from
19 * Intel's Quark BSP EFI, Linux and grub code.
20 *
21 * See quark-x1000-datasheet.pdf for register definitions.
22 * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf
23 */
24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
27#include <asm-generic/sections.h>
28#include <asm/cpu_device_id.h>
29#include <asm/imr.h>
30#include <asm/iosf_mbi.h>
31#include <linux/debugfs.h>
32#include <linux/init.h>
33#include <linux/mm.h>
34#include <linux/module.h>
35#include <linux/types.h>
36
37struct imr_device {
38 struct dentry *file;
39 bool init;
40 struct mutex lock;
41 int max_imr;
42 int reg_base;
43};
44
45static struct imr_device imr_dev;
46
47/*
48 * IMR read/write mask control registers.
49 * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for
50 * bit definitions.
51 *
52 * addr_hi
53 * 31 Lock bit
54 * 30:24 Reserved
55 * 23:2 1 KiB aligned lo address
56 * 1:0 Reserved
57 *
58 * addr_hi
59 * 31:24 Reserved
60 * 23:2 1 KiB aligned hi address
61 * 1:0 Reserved
62 */
63#define IMR_LOCK BIT(31)
64
65struct imr_regs {
66 u32 addr_lo;
67 u32 addr_hi;
68 u32 rmask;
69 u32 wmask;
70};
71
72#define IMR_NUM_REGS (sizeof(struct imr_regs)/sizeof(u32))
73#define IMR_SHIFT 8
74#define imr_to_phys(x) ((x) << IMR_SHIFT)
75#define phys_to_imr(x) ((x) >> IMR_SHIFT)
76
77/**
78 * imr_is_enabled - true if an IMR is enabled false otherwise.
79 *
80 * Determines if an IMR is enabled based on address range and read/write
81 * mask. An IMR set with an address range set to zero and a read/write
82 * access mask set to all is considered to be disabled. An IMR in any
83 * other state - for example set to zero but without read/write access
84 * all is considered to be enabled. This definition of disabled is how
85 * firmware switches off an IMR and is maintained in kernel for
86 * consistency.
87 *
88 * @imr: pointer to IMR descriptor.
89 * @return: true if IMR enabled false if disabled.
90 */
91static inline int imr_is_enabled(struct imr_regs *imr)
92{
93 return !(imr->rmask == IMR_READ_ACCESS_ALL &&
94 imr->wmask == IMR_WRITE_ACCESS_ALL &&
95 imr_to_phys(imr->addr_lo) == 0 &&
96 imr_to_phys(imr->addr_hi) == 0);
97}
98
99/**
100 * imr_read - read an IMR at a given index.
101 *
102 * Requires caller to hold imr mutex.
103 *
104 * @idev: pointer to imr_device structure.
105 * @imr_id: IMR entry to read.
106 * @imr: IMR structure representing address and access masks.
107 * @return: 0 on success or error code passed from mbi_iosf on failure.
108 */
109static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
110{
111 u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
112 int ret;
113
114 ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
115 reg++, &imr->addr_lo);
116 if (ret)
117 return ret;
118
119 ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
120 reg++, &imr->addr_hi);
121 if (ret)
122 return ret;
123
124 ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
125 reg++, &imr->rmask);
126 if (ret)
127 return ret;
128
129 ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
130 reg++, &imr->wmask);
131 if (ret)
132 return ret;
133
134 return 0;
135}
136
137/**
138 * imr_write - write an IMR at a given index.
139 *
140 * Requires caller to hold imr mutex.
141 * Note lock bits need to be written independently of address bits.
142 *
143 * @idev: pointer to imr_device structure.
144 * @imr_id: IMR entry to write.
145 * @imr: IMR structure representing address and access masks.
146 * @lock: indicates if the IMR lock bit should be applied.
147 * @return: 0 on success or error code passed from mbi_iosf on failure.
148 */
149static int imr_write(struct imr_device *idev, u32 imr_id,
150 struct imr_regs *imr, bool lock)
151{
152 unsigned long flags;
153 u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
154 int ret;
155
156 local_irq_save(flags);
157
158 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, reg++,
159 imr->addr_lo);
160 if (ret)
161 goto failed;
162
163 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
164 reg++, imr->addr_hi);
165 if (ret)
166 goto failed;
167
168 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
169 reg++, imr->rmask);
170 if (ret)
171 goto failed;
172
173 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
174 reg++, imr->wmask);
175 if (ret)
176 goto failed;
177
178 /* Lock bit must be set separately to addr_lo address bits. */
179 if (lock) {
180 imr->addr_lo |= IMR_LOCK;
181 ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
182 reg - IMR_NUM_REGS, imr->addr_lo);
183 if (ret)
184 goto failed;
185 }
186
187 local_irq_restore(flags);
188 return 0;
189failed:
190 /*
191 * If writing to the IOSF failed then we're in an unknown state,
192 * likely a very bad state. An IMR in an invalid state will almost
193 * certainly lead to a memory access violation.
194 */
195 local_irq_restore(flags);
196 WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n",
197 imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK);
198
199 return ret;
200}
201
202/**
203 * imr_dbgfs_state_show - print state of IMR registers.
204 *
205 * @s: pointer to seq_file for output.
206 * @unused: unused parameter.
207 * @return: 0 on success or error code passed from mbi_iosf on failure.
208 */
209static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
210{
211 phys_addr_t base;
212 phys_addr_t end;
213 int i;
214 struct imr_device *idev = s->private;
215 struct imr_regs imr;
216 size_t size;
217 int ret = -ENODEV;
218
219 mutex_lock(&idev->lock);
220
221 for (i = 0; i < idev->max_imr; i++) {
222
223 ret = imr_read(idev, i, &imr);
224 if (ret)
225 break;
226
227 /*
228 * Remember to add IMR_ALIGN bytes to size to indicate the
229 * inherent IMR_ALIGN size bytes contained in the masked away
230 * lower ten bits.
231 */
232 if (imr_is_enabled(&imr)) {
233 base = imr_to_phys(imr.addr_lo);
234 end = imr_to_phys(imr.addr_hi) + IMR_MASK;
235 } else {
236 base = 0;
237 end = 0;
238 }
239 size = end - base;
240 seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
241 "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
242 &base, &end, size, imr.rmask, imr.wmask,
243 imr_is_enabled(&imr) ? "enabled " : "disabled",
244 imr.addr_lo & IMR_LOCK ? "locked" : "unlocked");
245 }
246
247 mutex_unlock(&idev->lock);
248 return ret;
249}
250
251/**
252 * imr_state_open - debugfs open callback.
253 *
254 * @inode: pointer to struct inode.
255 * @file: pointer to struct file.
256 * @return: result of single open.
257 */
258static int imr_state_open(struct inode *inode, struct file *file)
259{
260 return single_open(file, imr_dbgfs_state_show, inode->i_private);
261}
262
263static const struct file_operations imr_state_ops = {
264 .open = imr_state_open,
265 .read = seq_read,
266 .llseek = seq_lseek,
267 .release = single_release,
268};
269
270/**
271 * imr_debugfs_register - register debugfs hooks.
272 *
273 * @idev: pointer to imr_device structure.
274 * @return: 0 on success - errno on failure.
275 */
276static int imr_debugfs_register(struct imr_device *idev)
277{
278 idev->file = debugfs_create_file("imr_state", S_IFREG | S_IRUGO, NULL,
279 idev, &imr_state_ops);
280 if (IS_ERR(idev->file))
281 return PTR_ERR(idev->file);
282
283 return 0;
284}
285
286/**
287 * imr_debugfs_unregister - unregister debugfs hooks.
288 *
289 * @idev: pointer to imr_device structure.
290 * @return:
291 */
292static void imr_debugfs_unregister(struct imr_device *idev)
293{
294 debugfs_remove(idev->file);
295}
296
297/**
298 * imr_check_params - check passed address range IMR alignment and non-zero size
299 *
300 * @base: base address of intended IMR.
301 * @size: size of intended IMR.
302 * @return: zero on valid range -EINVAL on unaligned base/size.
303 */
304static int imr_check_params(phys_addr_t base, size_t size)
305{
306 if ((base & IMR_MASK) || (size & IMR_MASK)) {
307 pr_err("base %pa size 0x%08zx must align to 1KiB\n",
308 &base, size);
309 return -EINVAL;
310 }
311 if (size == 0)
312 return -EINVAL;
313
314 return 0;
315}
316
317/**
318 * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends.
319 *
320 * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the
321 * value in the register. We need to subtract IMR_ALIGN bytes from input sizes
322 * as a result.
323 *
324 * @size: input size bytes.
325 * @return: reduced size.
326 */
327static inline size_t imr_raw_size(size_t size)
328{
329 return size - IMR_ALIGN;
330}
331
332/**
333 * imr_address_overlap - detects an address overlap.
334 *
335 * @addr: address to check against an existing IMR.
336 * @imr: imr being checked.
337 * @return: true for overlap false for no overlap.
338 */
339static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr)
340{
341 return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi);
342}
343
344/**
345 * imr_add_range - add an Isolated Memory Region.
346 *
347 * @base: physical base address of region aligned to 1KiB.
348 * @size: physical size of region in bytes must be aligned to 1KiB.
349 * @read_mask: read access mask.
350 * @write_mask: write access mask.
351 * @lock: indicates whether or not to permanently lock this region.
352 * @return: zero on success or negative value indicating error.
353 */
354int imr_add_range(phys_addr_t base, size_t size,
355 unsigned int rmask, unsigned int wmask, bool lock)
356{
357 phys_addr_t end;
358 unsigned int i;
359 struct imr_device *idev = &imr_dev;
360 struct imr_regs imr;
361 size_t raw_size;
362 int reg;
363 int ret;
364
365 if (WARN_ONCE(idev->init == false, "driver not initialized"))
366 return -ENODEV;
367
368 ret = imr_check_params(base, size);
369 if (ret)
370 return ret;
371
372 /* Tweak the size value. */
373 raw_size = imr_raw_size(size);
374 end = base + raw_size;
375
376 /*
377 * Check for reserved IMR value common to firmware, kernel and grub
378 * indicating a disabled IMR.
379 */
380 imr.addr_lo = phys_to_imr(base);
381 imr.addr_hi = phys_to_imr(end);
382 imr.rmask = rmask;
383 imr.wmask = wmask;
384 if (!imr_is_enabled(&imr))
385 return -ENOTSUPP;
386
387 mutex_lock(&idev->lock);
388
389 /*
390 * Find a free IMR while checking for an existing overlapping range.
391 * Note there's no restriction in silicon to prevent IMR overlaps.
392 * For the sake of simplicity and ease in defining/debugging an IMR
393 * memory map we exclude IMR overlaps.
394 */
395 reg = -1;
396 for (i = 0; i < idev->max_imr; i++) {
397 ret = imr_read(idev, i, &imr);
398 if (ret)
399 goto failed;
400
401 /* Find overlap @ base or end of requested range. */
402 ret = -EINVAL;
403 if (imr_is_enabled(&imr)) {
404 if (imr_address_overlap(base, &imr))
405 goto failed;
406 if (imr_address_overlap(end, &imr))
407 goto failed;
408 } else {
409 reg = i;
410 }
411 }
412
413 /* Error out if we have no free IMR entries. */
414 if (reg == -1) {
415 ret = -ENOMEM;
416 goto failed;
417 }
418
419 pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n",
420 reg, &base, &end, raw_size, rmask, wmask);
421
422 /* Enable IMR at specified range and access mask. */
423 imr.addr_lo = phys_to_imr(base);
424 imr.addr_hi = phys_to_imr(end);
425 imr.rmask = rmask;
426 imr.wmask = wmask;
427
428 ret = imr_write(idev, reg, &imr, lock);
429 if (ret < 0) {
430 /*
431 * In the highly unlikely event iosf_mbi_write failed
432 * attempt to rollback the IMR setup skipping the trapping
433 * of further IOSF write failures.
434 */
435 imr.addr_lo = 0;
436 imr.addr_hi = 0;
437 imr.rmask = IMR_READ_ACCESS_ALL;
438 imr.wmask = IMR_WRITE_ACCESS_ALL;
439 imr_write(idev, reg, &imr, false);
440 }
441failed:
442 mutex_unlock(&idev->lock);
443 return ret;
444}
445EXPORT_SYMBOL_GPL(imr_add_range);
446
447/**
448 * __imr_remove_range - delete an Isolated Memory Region.
449 *
450 * This function allows you to delete an IMR by its index specified by reg or
451 * by address range specified by base and size respectively. If you specify an
452 * index on its own the base and size parameters are ignored.
453 * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored.
454 * imr_remove_range(-1, base, size); delete IMR from base to base+size.
455 *
456 * @reg: imr index to remove.
457 * @base: physical base address of region aligned to 1 KiB.
458 * @size: physical size of region in bytes aligned to 1 KiB.
459 * @return: -EINVAL on invalid range or out or range id
460 * -ENODEV if reg is valid but no IMR exists or is locked
461 * 0 on success.
462 */
463static int __imr_remove_range(int reg, phys_addr_t base, size_t size)
464{
465 phys_addr_t end;
466 bool found = false;
467 unsigned int i;
468 struct imr_device *idev = &imr_dev;
469 struct imr_regs imr;
470 size_t raw_size;
471 int ret = 0;
472
473 if (WARN_ONCE(idev->init == false, "driver not initialized"))
474 return -ENODEV;
475
476 /*
477 * Validate address range if deleting by address, else we are
478 * deleting by index where base and size will be ignored.
479 */
480 if (reg == -1) {
481 ret = imr_check_params(base, size);
482 if (ret)
483 return ret;
484 }
485
486 /* Tweak the size value. */
487 raw_size = imr_raw_size(size);
488 end = base + raw_size;
489
490 mutex_lock(&idev->lock);
491
492 if (reg >= 0) {
493 /* If a specific IMR is given try to use it. */
494 ret = imr_read(idev, reg, &imr);
495 if (ret)
496 goto failed;
497
498 if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) {
499 ret = -ENODEV;
500 goto failed;
501 }
502 found = true;
503 } else {
504 /* Search for match based on address range. */
505 for (i = 0; i < idev->max_imr; i++) {
506 ret = imr_read(idev, i, &imr);
507 if (ret)
508 goto failed;
509
510 if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK)
511 continue;
512
513 if ((imr_to_phys(imr.addr_lo) == base) &&
514 (imr_to_phys(imr.addr_hi) == end)) {
515 found = true;
516 reg = i;
517 break;
518 }
519 }
520 }
521
522 if (!found) {
523 ret = -ENODEV;
524 goto failed;
525 }
526
527 pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size);
528
529 /* Tear down the IMR. */
530 imr.addr_lo = 0;
531 imr.addr_hi = 0;
532 imr.rmask = IMR_READ_ACCESS_ALL;
533 imr.wmask = IMR_WRITE_ACCESS_ALL;
534
535 ret = imr_write(idev, reg, &imr, false);
536
537failed:
538 mutex_unlock(&idev->lock);
539 return ret;
540}
541
542/**
543 * imr_remove_range - delete an Isolated Memory Region by address
544 *
545 * This function allows you to delete an IMR by an address range specified
546 * by base and size respectively.
547 * imr_remove_range(base, size); delete IMR from base to base+size.
548 *
549 * @base: physical base address of region aligned to 1 KiB.
550 * @size: physical size of region in bytes aligned to 1 KiB.
551 * @return: -EINVAL on invalid range or out or range id
552 * -ENODEV if reg is valid but no IMR exists or is locked
553 * 0 on success.
554 */
555int imr_remove_range(phys_addr_t base, size_t size)
556{
557 return __imr_remove_range(-1, base, size);
558}
559EXPORT_SYMBOL_GPL(imr_remove_range);
560
561/**
562 * imr_clear - delete an Isolated Memory Region by index
563 *
564 * This function allows you to delete an IMR by an address range specified
565 * by the index of the IMR. Useful for initial sanitization of the IMR
566 * address map.
567 * imr_ge(base, size); delete IMR from base to base+size.
568 *
569 * @reg: imr index to remove.
570 * @return: -EINVAL on invalid range or out or range id
571 * -ENODEV if reg is valid but no IMR exists or is locked
572 * 0 on success.
573 */
574static inline int imr_clear(int reg)
575{
576 return __imr_remove_range(reg, 0, 0);
577}
578
579/**
580 * imr_fixup_memmap - Tear down IMRs used during bootup.
581 *
582 * BIOS and Grub both setup IMRs around compressed kernel, initrd memory
583 * that need to be removed before the kernel hands out one of the IMR
584 * encased addresses to a downstream DMA agent such as the SD or Ethernet.
585 * IMRs on Galileo are setup to immediately reset the system on violation.
586 * As a result if you're running a root filesystem from SD - you'll need
587 * the boot-time IMRs torn down or you'll find seemingly random resets when
588 * using your filesystem.
589 *
590 * @idev: pointer to imr_device structure.
591 * @return:
592 */
593static void __init imr_fixup_memmap(struct imr_device *idev)
594{
595 phys_addr_t base = virt_to_phys(&_text);
596 size_t size = virt_to_phys(&__end_rodata) - base;
597 int i;
598 int ret;
599
600 /* Tear down all existing unlocked IMRs. */
601 for (i = 0; i < idev->max_imr; i++)
602 imr_clear(i);
603
604 /*
605 * Setup a locked IMR around the physical extent of the kernel
606 * from the beginning of the .text secton to the end of the
607 * .rodata section as one physically contiguous block.
608 */
609 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
610 if (ret < 0) {
611 pr_err("unable to setup IMR for kernel: (%p - %p)\n",
612 &_text, &__end_rodata);
613 } else {
614 pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n",
615 size / 1024, &_text, &__end_rodata);
616 }
617
618}
619
620static const struct x86_cpu_id imr_ids[] __initconst = {
621 { X86_VENDOR_INTEL, 5, 9 }, /* Intel Quark SoC X1000. */
622 {}
623};
624MODULE_DEVICE_TABLE(x86cpu, imr_ids);
625
626/**
627 * imr_init - entry point for IMR driver.
628 *
629 * return: -ENODEV for no IMR support 0 if good to go.
630 */
631static int __init imr_init(void)
632{
633 struct imr_device *idev = &imr_dev;
634 int ret;
635
636 if (!x86_match_cpu(imr_ids) || !iosf_mbi_available())
637 return -ENODEV;
638
639 idev->max_imr = QUARK_X1000_IMR_MAX;
640 idev->reg_base = QUARK_X1000_IMR_REGBASE;
641 idev->init = true;
642
643 mutex_init(&idev->lock);
644 ret = imr_debugfs_register(idev);
645 if (ret != 0)
646 pr_warn("debugfs register failed!\n");
647 imr_fixup_memmap(idev);
648 return 0;
649}
650
651/**
652 * imr_exit - exit point for IMR code.
653 *
654 * Deregisters debugfs, leave IMR state as-is.
655 *
656 * return:
657 */
658static void __exit imr_exit(void)
659{
660 imr_debugfs_unregister(&imr_dev);
661}
662
663module_init(imr_init);
664module_exit(imr_exit);
665
666MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
667MODULE_DESCRIPTION("Intel Isolated Memory Region driver");
668MODULE_LICENSE("Dual BSD/GPL");
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
new file mode 100644
index 000000000000..c9a0838890e2
--- /dev/null
+++ b/arch/x86/platform/intel-quark/imr_selftest.c
@@ -0,0 +1,129 @@
1/**
2 * imr_selftest.c
3 *
4 * Copyright(c) 2013 Intel Corporation.
5 * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
6 *
7 * IMR self test. The purpose of this module is to run a set of tests on the
8 * IMR API to validate it's sanity. We check for overlapping, reserved
9 * addresses and setup/teardown sanity.
10 *
11 */
12
13#include <asm-generic/sections.h>
14#include <asm/imr.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/types.h>
19
20#define SELFTEST KBUILD_MODNAME ": "
21/**
22 * imr_self_test_result - Print result string for self test.
23 *
24 * @res: result code - true if test passed false otherwise.
25 * @fmt: format string.
26 * ... variadic argument list.
27 */
28static void __init imr_self_test_result(int res, const char *fmt, ...)
29{
30 va_list vlist;
31
32 /* Print pass/fail. */
33 if (res)
34 pr_info(SELFTEST "pass ");
35 else
36 pr_info(SELFTEST "fail ");
37
38 /* Print variable string. */
39 va_start(vlist, fmt);
40 vprintk(fmt, vlist);
41 va_end(vlist);
42
43 /* Optional warning. */
44 WARN(res == 0, "test failed");
45}
46#undef SELFTEST
47
48/**
49 * imr_self_test
50 *
51 * Verify IMR self_test with some simple tests to verify overlap,
52 * zero sized allocations and 1 KiB sized areas.
53 *
54 */
55static void __init imr_self_test(void)
56{
57 phys_addr_t base = virt_to_phys(&_text);
58 size_t size = virt_to_phys(&__end_rodata) - base;
59 const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
60 int ret;
61
62 /* Test zero zero. */
63 ret = imr_add_range(0, 0, 0, 0, false);
64 imr_self_test_result(ret < 0, "zero sized IMR\n");
65
66 /* Test exact overlap. */
67 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
68 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
69
70 /* Test overlap with base inside of existing. */
71 base += size - IMR_ALIGN;
72 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
73 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
74
75 /* Test overlap with end inside of existing. */
76 base -= size + IMR_ALIGN * 2;
77 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
78 imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
79
80 /* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */
81 ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL,
82 IMR_WRITE_ACCESS_ALL, false);
83 imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n");
84
85 /* Test that a 1 KiB IMR @ zero with CPU only will work. */
86 ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU, false);
87 imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n");
88 if (ret >= 0) {
89 ret = imr_remove_range(0, IMR_ALIGN);
90 imr_self_test_result(ret == 0, "teardown - cpu-access\n");
91 }
92
93 /* Test 2 KiB works. */
94 size = IMR_ALIGN * 2;
95 ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL,
96 IMR_WRITE_ACCESS_ALL, false);
97 imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n");
98 if (ret >= 0) {
99 ret = imr_remove_range(0, size);
100 imr_self_test_result(ret == 0, "teardown 2KiB\n");
101 }
102}
103
104/**
105 * imr_self_test_init - entry point for IMR driver.
106 *
107 * return: -ENODEV for no IMR support 0 if good to go.
108 */
109static int __init imr_self_test_init(void)
110{
111 imr_self_test();
112 return 0;
113}
114
115/**
116 * imr_self_test_exit - exit point for IMR code.
117 *
118 * return:
119 */
120static void __exit imr_self_test_exit(void)
121{
122}
123
124module_init(imr_self_test_init);
125module_exit(imr_self_test_exit);
126
127MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
128MODULE_DESCRIPTION("Intel Isolated Memory Region self-test driver");
129MODULE_LICENSE("Dual BSD/GPL");