diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-02-28 02:03:10 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-02-28 02:03:10 -0500 |
commit | 5838d18955b52467f4b30486e62a31727b39998d (patch) | |
tree | 8aeb8412156bab93a6b39f2de4a8d6c912ddb31a /arch/x86/platform | |
parent | 579deee571a755c485ad702ef82c77a98a2ccc05 (diff) | |
parent | 895c8b7b4623d4f55e260e5dee2574b4f7113105 (diff) |
Merge branch 'linus' into x86/urgent, to merge dependent patch
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/platform')
-rw-r--r-- | arch/x86/platform/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/device_libs/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/device_libs/platform_max3111.c | 35 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/early_printk_intel_mid.c | 220 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/intel_mid_vrtc.c | 2 | ||||
-rw-r--r-- | arch/x86/platform/intel-quark/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/platform/intel-quark/imr.c | 661 | ||||
-rw-r--r-- | arch/x86/platform/intel-quark/imr_selftest.c | 129 | ||||
-rw-r--r-- | arch/x86/platform/uv/uv_nmi.c | 25 |
9 files changed, 805 insertions, 272 deletions
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile index 85afde1fa3e5..a62e0be3a2f1 100644 --- a/arch/x86/platform/Makefile +++ b/arch/x86/platform/Makefile | |||
@@ -5,6 +5,7 @@ obj-y += geode/ | |||
5 | obj-y += goldfish/ | 5 | obj-y += goldfish/ |
6 | obj-y += iris/ | 6 | obj-y += iris/ |
7 | obj-y += intel-mid/ | 7 | obj-y += intel-mid/ |
8 | obj-y += intel-quark/ | ||
8 | obj-y += olpc/ | 9 | obj-y += olpc/ |
9 | obj-y += scx200/ | 10 | obj-y += scx200/ |
10 | obj-y += sfi/ | 11 | obj-y += sfi/ |
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile index af9307f2cc28..91ec9f8704bf 100644 --- a/arch/x86/platform/intel-mid/device_libs/Makefile +++ b/arch/x86/platform/intel-mid/device_libs/Makefile | |||
@@ -16,8 +16,6 @@ obj-$(subst m,y,$(CONFIG_INPUT_MPU3050)) += platform_mpu3050.o | |||
16 | obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o | 16 | obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o |
17 | obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o | 17 | obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o |
18 | obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o | 18 | obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o |
19 | # SPI Devices | ||
20 | obj-$(subst m,y,$(CONFIG_SERIAL_MRST_MAX3110)) += platform_max3111.o | ||
21 | # MISC Devices | 19 | # MISC Devices |
22 | obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o | 20 | obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o |
23 | obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o | 21 | obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o |
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max3111.c b/arch/x86/platform/intel-mid/device_libs/platform_max3111.c deleted file mode 100644 index afd1df94e0e5..000000000000 --- a/arch/x86/platform/intel-mid/device_libs/platform_max3111.c +++ /dev/null | |||
@@ -1,35 +0,0 @@ | |||
1 | /* | ||
2 | * platform_max3111.c: max3111 platform data initilization file | ||
3 | * | ||
4 | * (C) Copyright 2013 Intel Corporation | ||
5 | * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; version 2 | ||
10 | * of the License. | ||
11 | */ | ||
12 | |||
13 | #include <linux/gpio.h> | ||
14 | #include <linux/spi/spi.h> | ||
15 | #include <asm/intel-mid.h> | ||
16 | |||
17 | static void __init *max3111_platform_data(void *info) | ||
18 | { | ||
19 | struct spi_board_info *spi_info = info; | ||
20 | int intr = get_gpio_by_name("max3111_int"); | ||
21 | |||
22 | spi_info->mode = SPI_MODE_0; | ||
23 | if (intr == -1) | ||
24 | return NULL; | ||
25 | spi_info->irq = intr + INTEL_MID_IRQ_OFFSET; | ||
26 | return NULL; | ||
27 | } | ||
28 | |||
29 | static const struct devs_id max3111_dev_id __initconst = { | ||
30 | .name = "spi_max3111", | ||
31 | .type = SFI_DEV_TYPE_SPI, | ||
32 | .get_platform_data = &max3111_platform_data, | ||
33 | }; | ||
34 | |||
35 | sfi_device(max3111_dev_id); | ||
diff --git a/arch/x86/platform/intel-mid/early_printk_intel_mid.c b/arch/x86/platform/intel-mid/early_printk_intel_mid.c index e0bd082a80e0..4e720829ab90 100644 --- a/arch/x86/platform/intel-mid/early_printk_intel_mid.c +++ b/arch/x86/platform/intel-mid/early_printk_intel_mid.c | |||
@@ -10,15 +10,13 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | /* | 12 | /* |
13 | * This file implements two early consoles named mrst and hsu. | 13 | * This file implements early console named hsu. |
14 | * mrst is based on Maxim3110 spi-uart device, it exists in both | 14 | * hsu is based on a High Speed UART device which only exists in the Medfield |
15 | * Moorestown and Medfield platforms, while hsu is based on a High | 15 | * platform |
16 | * Speed UART device which only exists in the Medfield platform | ||
17 | */ | 16 | */ |
18 | 17 | ||
19 | #include <linux/serial_reg.h> | 18 | #include <linux/serial_reg.h> |
20 | #include <linux/serial_mfd.h> | 19 | #include <linux/serial_mfd.h> |
21 | #include <linux/kmsg_dump.h> | ||
22 | #include <linux/console.h> | 20 | #include <linux/console.h> |
23 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
24 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
@@ -28,216 +26,6 @@ | |||
28 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
29 | #include <asm/intel-mid.h> | 27 | #include <asm/intel-mid.h> |
30 | 28 | ||
31 | #define MRST_SPI_TIMEOUT 0x200000 | ||
32 | #define MRST_REGBASE_SPI0 0xff128000 | ||
33 | #define MRST_REGBASE_SPI1 0xff128400 | ||
34 | #define MRST_CLK_SPI0_REG 0xff11d86c | ||
35 | |||
36 | /* Bit fields in CTRLR0 */ | ||
37 | #define SPI_DFS_OFFSET 0 | ||
38 | |||
39 | #define SPI_FRF_OFFSET 4 | ||
40 | #define SPI_FRF_SPI 0x0 | ||
41 | #define SPI_FRF_SSP 0x1 | ||
42 | #define SPI_FRF_MICROWIRE 0x2 | ||
43 | #define SPI_FRF_RESV 0x3 | ||
44 | |||
45 | #define SPI_MODE_OFFSET 6 | ||
46 | #define SPI_SCPH_OFFSET 6 | ||
47 | #define SPI_SCOL_OFFSET 7 | ||
48 | #define SPI_TMOD_OFFSET 8 | ||
49 | #define SPI_TMOD_TR 0x0 /* xmit & recv */ | ||
50 | #define SPI_TMOD_TO 0x1 /* xmit only */ | ||
51 | #define SPI_TMOD_RO 0x2 /* recv only */ | ||
52 | #define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */ | ||
53 | |||
54 | #define SPI_SLVOE_OFFSET 10 | ||
55 | #define SPI_SRL_OFFSET 11 | ||
56 | #define SPI_CFS_OFFSET 12 | ||
57 | |||
58 | /* Bit fields in SR, 7 bits */ | ||
59 | #define SR_MASK 0x7f /* cover 7 bits */ | ||
60 | #define SR_BUSY (1 << 0) | ||
61 | #define SR_TF_NOT_FULL (1 << 1) | ||
62 | #define SR_TF_EMPT (1 << 2) | ||
63 | #define SR_RF_NOT_EMPT (1 << 3) | ||
64 | #define SR_RF_FULL (1 << 4) | ||
65 | #define SR_TX_ERR (1 << 5) | ||
66 | #define SR_DCOL (1 << 6) | ||
67 | |||
68 | struct dw_spi_reg { | ||
69 | u32 ctrl0; | ||
70 | u32 ctrl1; | ||
71 | u32 ssienr; | ||
72 | u32 mwcr; | ||
73 | u32 ser; | ||
74 | u32 baudr; | ||
75 | u32 txfltr; | ||
76 | u32 rxfltr; | ||
77 | u32 txflr; | ||
78 | u32 rxflr; | ||
79 | u32 sr; | ||
80 | u32 imr; | ||
81 | u32 isr; | ||
82 | u32 risr; | ||
83 | u32 txoicr; | ||
84 | u32 rxoicr; | ||
85 | u32 rxuicr; | ||
86 | u32 msticr; | ||
87 | u32 icr; | ||
88 | u32 dmacr; | ||
89 | u32 dmatdlr; | ||
90 | u32 dmardlr; | ||
91 | u32 idr; | ||
92 | u32 version; | ||
93 | |||
94 | /* Currently operates as 32 bits, though only the low 16 bits matter */ | ||
95 | u32 dr; | ||
96 | } __packed; | ||
97 | |||
98 | #define dw_readl(dw, name) __raw_readl(&(dw)->name) | ||
99 | #define dw_writel(dw, name, val) __raw_writel((val), &(dw)->name) | ||
100 | |||
101 | /* Default use SPI0 register for mrst, we will detect Penwell and use SPI1 */ | ||
102 | static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0; | ||
103 | |||
104 | static u32 *pclk_spi0; | ||
105 | /* Always contains an accessible address, start with 0 */ | ||
106 | static struct dw_spi_reg *pspi; | ||
107 | |||
108 | static struct kmsg_dumper dw_dumper; | ||
109 | static int dumper_registered; | ||
110 | |||
111 | static void dw_kmsg_dump(struct kmsg_dumper *dumper, | ||
112 | enum kmsg_dump_reason reason) | ||
113 | { | ||
114 | static char line[1024]; | ||
115 | size_t len; | ||
116 | |||
117 | /* When run to this, we'd better re-init the HW */ | ||
118 | mrst_early_console_init(); | ||
119 | |||
120 | while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len)) | ||
121 | early_mrst_console.write(&early_mrst_console, line, len); | ||
122 | } | ||
123 | |||
124 | /* Set the ratio rate to 115200, 8n1, IRQ disabled */ | ||
125 | static void max3110_write_config(void) | ||
126 | { | ||
127 | u16 config; | ||
128 | |||
129 | config = 0xc001; | ||
130 | dw_writel(pspi, dr, config); | ||
131 | } | ||
132 | |||
133 | /* Translate char to a eligible word and send to max3110 */ | ||
134 | static void max3110_write_data(char c) | ||
135 | { | ||
136 | u16 data; | ||
137 | |||
138 | data = 0x8000 | c; | ||
139 | dw_writel(pspi, dr, data); | ||
140 | } | ||
141 | |||
142 | void mrst_early_console_init(void) | ||
143 | { | ||
144 | u32 ctrlr0 = 0; | ||
145 | u32 spi0_cdiv; | ||
146 | u32 freq; /* Freqency info only need be searched once */ | ||
147 | |||
148 | /* Base clk is 100 MHz, the actual clk = 100M / (clk_divider + 1) */ | ||
149 | pclk_spi0 = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, | ||
150 | MRST_CLK_SPI0_REG); | ||
151 | spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9; | ||
152 | freq = 100000000 / (spi0_cdiv + 1); | ||
153 | |||
154 | if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL) | ||
155 | mrst_spi_paddr = MRST_REGBASE_SPI1; | ||
156 | |||
157 | pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, | ||
158 | mrst_spi_paddr); | ||
159 | |||
160 | /* Disable SPI controller */ | ||
161 | dw_writel(pspi, ssienr, 0); | ||
162 | |||
163 | /* Set control param, 8 bits, transmit only mode */ | ||
164 | ctrlr0 = dw_readl(pspi, ctrl0); | ||
165 | |||
166 | ctrlr0 &= 0xfcc0; | ||
167 | ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET) | ||
168 | | (SPI_TMOD_TO << SPI_TMOD_OFFSET); | ||
169 | dw_writel(pspi, ctrl0, ctrlr0); | ||
170 | |||
171 | /* | ||
172 | * Change the spi0 clk to comply with 115200 bps, use 100000 to | ||
173 | * calculate the clk dividor to make the clock a little slower | ||
174 | * than real baud rate. | ||
175 | */ | ||
176 | dw_writel(pspi, baudr, freq/100000); | ||
177 | |||
178 | /* Disable all INT for early phase */ | ||
179 | dw_writel(pspi, imr, 0x0); | ||
180 | |||
181 | /* Set the cs to spi-uart */ | ||
182 | dw_writel(pspi, ser, 0x2); | ||
183 | |||
184 | /* Enable the HW, the last step for HW init */ | ||
185 | dw_writel(pspi, ssienr, 0x1); | ||
186 | |||
187 | /* Set the default configuration */ | ||
188 | max3110_write_config(); | ||
189 | |||
190 | /* Register the kmsg dumper */ | ||
191 | if (!dumper_registered) { | ||
192 | dw_dumper.dump = dw_kmsg_dump; | ||
193 | kmsg_dump_register(&dw_dumper); | ||
194 | dumper_registered = 1; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | /* Slave select should be called in the read/write function */ | ||
199 | static void early_mrst_spi_putc(char c) | ||
200 | { | ||
201 | unsigned int timeout; | ||
202 | u32 sr; | ||
203 | |||
204 | timeout = MRST_SPI_TIMEOUT; | ||
205 | /* Early putc needs to make sure the TX FIFO is not full */ | ||
206 | while (--timeout) { | ||
207 | sr = dw_readl(pspi, sr); | ||
208 | if (!(sr & SR_TF_NOT_FULL)) | ||
209 | cpu_relax(); | ||
210 | else | ||
211 | break; | ||
212 | } | ||
213 | |||
214 | if (!timeout) | ||
215 | pr_warn("MRST earlycon: timed out\n"); | ||
216 | else | ||
217 | max3110_write_data(c); | ||
218 | } | ||
219 | |||
220 | /* Early SPI only uses polling mode */ | ||
221 | static void early_mrst_spi_write(struct console *con, const char *str, | ||
222 | unsigned n) | ||
223 | { | ||
224 | int i; | ||
225 | |||
226 | for (i = 0; i < n && *str; i++) { | ||
227 | if (*str == '\n') | ||
228 | early_mrst_spi_putc('\r'); | ||
229 | early_mrst_spi_putc(*str); | ||
230 | str++; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | struct console early_mrst_console = { | ||
235 | .name = "earlymrst", | ||
236 | .write = early_mrst_spi_write, | ||
237 | .flags = CON_PRINTBUFFER, | ||
238 | .index = -1, | ||
239 | }; | ||
240 | |||
241 | /* | 29 | /* |
242 | * Following is the early console based on Medfield HSU (High | 30 | * Following is the early console based on Medfield HSU (High |
243 | * Speed UART) device. | 31 | * Speed UART) device. |
@@ -259,7 +47,7 @@ void hsu_early_console_init(const char *s) | |||
259 | port = clamp_val(port, 0, 2); | 47 | port = clamp_val(port, 0, 2); |
260 | 48 | ||
261 | paddr = HSU_PORT_BASE + port * 0x80; | 49 | paddr = HSU_PORT_BASE + port * 0x80; |
262 | phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, paddr); | 50 | phsu = (void __iomem *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, paddr); |
263 | 51 | ||
264 | /* Disable FIFO */ | 52 | /* Disable FIFO */ |
265 | writeb(0x0, phsu + UART_FCR); | 53 | writeb(0x0, phsu + UART_FCR); |
diff --git a/arch/x86/platform/intel-mid/intel_mid_vrtc.c b/arch/x86/platform/intel-mid/intel_mid_vrtc.c index 4762cff7facd..32947ba0f62d 100644 --- a/arch/x86/platform/intel-mid/intel_mid_vrtc.c +++ b/arch/x86/platform/intel-mid/intel_mid_vrtc.c | |||
@@ -110,7 +110,7 @@ int vrtc_set_mmss(const struct timespec *now) | |||
110 | spin_unlock_irqrestore(&rtc_lock, flags); | 110 | spin_unlock_irqrestore(&rtc_lock, flags); |
111 | } else { | 111 | } else { |
112 | pr_err("%s: Invalid vRTC value: write of %lx to vRTC failed\n", | 112 | pr_err("%s: Invalid vRTC value: write of %lx to vRTC failed\n", |
113 | __FUNCTION__, now->tv_sec); | 113 | __func__, now->tv_sec); |
114 | retval = -EINVAL; | 114 | retval = -EINVAL; |
115 | } | 115 | } |
116 | return retval; | 116 | return retval; |
diff --git a/arch/x86/platform/intel-quark/Makefile b/arch/x86/platform/intel-quark/Makefile new file mode 100644 index 000000000000..9cc57ed36022 --- /dev/null +++ b/arch/x86/platform/intel-quark/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_INTEL_IMR) += imr.o | ||
2 | obj-$(CONFIG_DEBUG_IMR_SELFTEST) += imr_selftest.o | ||
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c new file mode 100644 index 000000000000..0ee619f9fcb7 --- /dev/null +++ b/arch/x86/platform/intel-quark/imr.c | |||
@@ -0,0 +1,661 @@ | |||
1 | /** | ||
2 | * imr.c | ||
3 | * | ||
4 | * Copyright(c) 2013 Intel Corporation. | ||
5 | * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie> | ||
6 | * | ||
7 | * IMR registers define an isolated region of memory that can | ||
8 | * be masked to prohibit certain system agents from accessing memory. | ||
9 | * When a device behind a masked port performs an access - snooped or | ||
10 | * not, an IMR may optionally prevent that transaction from changing | ||
11 | * the state of memory or from getting correct data in response to the | ||
12 | * operation. | ||
13 | * | ||
14 | * Write data will be dropped and reads will return 0xFFFFFFFF, the | ||
15 | * system will reset and system BIOS will print out an error message to | ||
16 | * inform the user that an IMR has been violated. | ||
17 | * | ||
18 | * This code is based on the Linux MTRR code and reference code from | ||
19 | * Intel's Quark BSP EFI, Linux and grub code. | ||
20 | * | ||
21 | * See quark-x1000-datasheet.pdf for register definitions. | ||
22 | * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf | ||
23 | */ | ||
24 | |||
25 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
26 | |||
27 | #include <asm-generic/sections.h> | ||
28 | #include <asm/cpu_device_id.h> | ||
29 | #include <asm/imr.h> | ||
30 | #include <asm/iosf_mbi.h> | ||
31 | #include <linux/debugfs.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/mm.h> | ||
34 | #include <linux/module.h> | ||
35 | #include <linux/types.h> | ||
36 | |||
37 | struct imr_device { | ||
38 | struct dentry *file; | ||
39 | bool init; | ||
40 | struct mutex lock; | ||
41 | int max_imr; | ||
42 | int reg_base; | ||
43 | }; | ||
44 | |||
45 | static struct imr_device imr_dev; | ||
46 | |||
47 | /* | ||
48 | * IMR read/write mask control registers. | ||
49 | * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for | ||
50 | * bit definitions. | ||
51 | * | ||
52 | * addr_hi | ||
53 | * 31 Lock bit | ||
54 | * 30:24 Reserved | ||
55 | * 23:2 1 KiB aligned lo address | ||
56 | * 1:0 Reserved | ||
57 | * | ||
58 | * addr_hi | ||
59 | * 31:24 Reserved | ||
60 | * 23:2 1 KiB aligned hi address | ||
61 | * 1:0 Reserved | ||
62 | */ | ||
63 | #define IMR_LOCK BIT(31) | ||
64 | |||
65 | struct imr_regs { | ||
66 | u32 addr_lo; | ||
67 | u32 addr_hi; | ||
68 | u32 rmask; | ||
69 | u32 wmask; | ||
70 | }; | ||
71 | |||
72 | #define IMR_NUM_REGS (sizeof(struct imr_regs)/sizeof(u32)) | ||
73 | #define IMR_SHIFT 8 | ||
74 | #define imr_to_phys(x) ((x) << IMR_SHIFT) | ||
75 | #define phys_to_imr(x) ((x) >> IMR_SHIFT) | ||
76 | |||
77 | /** | ||
78 | * imr_is_enabled - true if an IMR is enabled false otherwise. | ||
79 | * | ||
80 | * Determines if an IMR is enabled based on address range and read/write | ||
81 | * mask. An IMR set with an address range set to zero and a read/write | ||
82 | * access mask set to all is considered to be disabled. An IMR in any | ||
83 | * other state - for example set to zero but without read/write access | ||
84 | * all is considered to be enabled. This definition of disabled is how | ||
85 | * firmware switches off an IMR and is maintained in kernel for | ||
86 | * consistency. | ||
87 | * | ||
88 | * @imr: pointer to IMR descriptor. | ||
89 | * @return: true if IMR enabled false if disabled. | ||
90 | */ | ||
91 | static inline int imr_is_enabled(struct imr_regs *imr) | ||
92 | { | ||
93 | return !(imr->rmask == IMR_READ_ACCESS_ALL && | ||
94 | imr->wmask == IMR_WRITE_ACCESS_ALL && | ||
95 | imr_to_phys(imr->addr_lo) == 0 && | ||
96 | imr_to_phys(imr->addr_hi) == 0); | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * imr_read - read an IMR at a given index. | ||
101 | * | ||
102 | * Requires caller to hold imr mutex. | ||
103 | * | ||
104 | * @idev: pointer to imr_device structure. | ||
105 | * @imr_id: IMR entry to read. | ||
106 | * @imr: IMR structure representing address and access masks. | ||
107 | * @return: 0 on success or error code passed from mbi_iosf on failure. | ||
108 | */ | ||
109 | static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr) | ||
110 | { | ||
111 | u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; | ||
112 | int ret; | ||
113 | |||
114 | ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, | ||
115 | reg++, &imr->addr_lo); | ||
116 | if (ret) | ||
117 | return ret; | ||
118 | |||
119 | ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, | ||
120 | reg++, &imr->addr_hi); | ||
121 | if (ret) | ||
122 | return ret; | ||
123 | |||
124 | ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, | ||
125 | reg++, &imr->rmask); | ||
126 | if (ret) | ||
127 | return ret; | ||
128 | |||
129 | return iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, | ||
130 | reg++, &imr->wmask); | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * imr_write - write an IMR at a given index. | ||
135 | * | ||
136 | * Requires caller to hold imr mutex. | ||
137 | * Note lock bits need to be written independently of address bits. | ||
138 | * | ||
139 | * @idev: pointer to imr_device structure. | ||
140 | * @imr_id: IMR entry to write. | ||
141 | * @imr: IMR structure representing address and access masks. | ||
142 | * @lock: indicates if the IMR lock bit should be applied. | ||
143 | * @return: 0 on success or error code passed from mbi_iosf on failure. | ||
144 | */ | ||
145 | static int imr_write(struct imr_device *idev, u32 imr_id, | ||
146 | struct imr_regs *imr, bool lock) | ||
147 | { | ||
148 | unsigned long flags; | ||
149 | u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; | ||
150 | int ret; | ||
151 | |||
152 | local_irq_save(flags); | ||
153 | |||
154 | ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, reg++, | ||
155 | imr->addr_lo); | ||
156 | if (ret) | ||
157 | goto failed; | ||
158 | |||
159 | ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, | ||
160 | reg++, imr->addr_hi); | ||
161 | if (ret) | ||
162 | goto failed; | ||
163 | |||
164 | ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, | ||
165 | reg++, imr->rmask); | ||
166 | if (ret) | ||
167 | goto failed; | ||
168 | |||
169 | ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, | ||
170 | reg++, imr->wmask); | ||
171 | if (ret) | ||
172 | goto failed; | ||
173 | |||
174 | /* Lock bit must be set separately to addr_lo address bits. */ | ||
175 | if (lock) { | ||
176 | imr->addr_lo |= IMR_LOCK; | ||
177 | ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, | ||
178 | reg - IMR_NUM_REGS, imr->addr_lo); | ||
179 | if (ret) | ||
180 | goto failed; | ||
181 | } | ||
182 | |||
183 | local_irq_restore(flags); | ||
184 | return 0; | ||
185 | failed: | ||
186 | /* | ||
187 | * If writing to the IOSF failed then we're in an unknown state, | ||
188 | * likely a very bad state. An IMR in an invalid state will almost | ||
189 | * certainly lead to a memory access violation. | ||
190 | */ | ||
191 | local_irq_restore(flags); | ||
192 | WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n", | ||
193 | imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK); | ||
194 | |||
195 | return ret; | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * imr_dbgfs_state_show - print state of IMR registers. | ||
200 | * | ||
201 | * @s: pointer to seq_file for output. | ||
202 | * @unused: unused parameter. | ||
203 | * @return: 0 on success or error code passed from mbi_iosf on failure. | ||
204 | */ | ||
205 | static int imr_dbgfs_state_show(struct seq_file *s, void *unused) | ||
206 | { | ||
207 | phys_addr_t base; | ||
208 | phys_addr_t end; | ||
209 | int i; | ||
210 | struct imr_device *idev = s->private; | ||
211 | struct imr_regs imr; | ||
212 | size_t size; | ||
213 | int ret = -ENODEV; | ||
214 | |||
215 | mutex_lock(&idev->lock); | ||
216 | |||
217 | for (i = 0; i < idev->max_imr; i++) { | ||
218 | |||
219 | ret = imr_read(idev, i, &imr); | ||
220 | if (ret) | ||
221 | break; | ||
222 | |||
223 | /* | ||
224 | * Remember to add IMR_ALIGN bytes to size to indicate the | ||
225 | * inherent IMR_ALIGN size bytes contained in the masked away | ||
226 | * lower ten bits. | ||
227 | */ | ||
228 | if (imr_is_enabled(&imr)) { | ||
229 | base = imr_to_phys(imr.addr_lo); | ||
230 | end = imr_to_phys(imr.addr_hi) + IMR_MASK; | ||
231 | } else { | ||
232 | base = 0; | ||
233 | end = 0; | ||
234 | } | ||
235 | size = end - base; | ||
236 | seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx " | ||
237 | "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i, | ||
238 | &base, &end, size, imr.rmask, imr.wmask, | ||
239 | imr_is_enabled(&imr) ? "enabled " : "disabled", | ||
240 | imr.addr_lo & IMR_LOCK ? "locked" : "unlocked"); | ||
241 | } | ||
242 | |||
243 | mutex_unlock(&idev->lock); | ||
244 | return ret; | ||
245 | } | ||
246 | |||
247 | /** | ||
248 | * imr_state_open - debugfs open callback. | ||
249 | * | ||
250 | * @inode: pointer to struct inode. | ||
251 | * @file: pointer to struct file. | ||
252 | * @return: result of single open. | ||
253 | */ | ||
254 | static int imr_state_open(struct inode *inode, struct file *file) | ||
255 | { | ||
256 | return single_open(file, imr_dbgfs_state_show, inode->i_private); | ||
257 | } | ||
258 | |||
259 | static const struct file_operations imr_state_ops = { | ||
260 | .open = imr_state_open, | ||
261 | .read = seq_read, | ||
262 | .llseek = seq_lseek, | ||
263 | .release = single_release, | ||
264 | }; | ||
265 | |||
266 | /** | ||
267 | * imr_debugfs_register - register debugfs hooks. | ||
268 | * | ||
269 | * @idev: pointer to imr_device structure. | ||
270 | * @return: 0 on success - errno on failure. | ||
271 | */ | ||
272 | static int imr_debugfs_register(struct imr_device *idev) | ||
273 | { | ||
274 | idev->file = debugfs_create_file("imr_state", S_IFREG | S_IRUGO, NULL, | ||
275 | idev, &imr_state_ops); | ||
276 | return PTR_ERR_OR_ZERO(idev->file); | ||
277 | } | ||
278 | |||
279 | /** | ||
280 | * imr_debugfs_unregister - unregister debugfs hooks. | ||
281 | * | ||
282 | * @idev: pointer to imr_device structure. | ||
283 | * @return: | ||
284 | */ | ||
285 | static void imr_debugfs_unregister(struct imr_device *idev) | ||
286 | { | ||
287 | debugfs_remove(idev->file); | ||
288 | } | ||
289 | |||
290 | /** | ||
291 | * imr_check_params - check passed address range IMR alignment and non-zero size | ||
292 | * | ||
293 | * @base: base address of intended IMR. | ||
294 | * @size: size of intended IMR. | ||
295 | * @return: zero on valid range -EINVAL on unaligned base/size. | ||
296 | */ | ||
297 | static int imr_check_params(phys_addr_t base, size_t size) | ||
298 | { | ||
299 | if ((base & IMR_MASK) || (size & IMR_MASK)) { | ||
300 | pr_err("base %pa size 0x%08zx must align to 1KiB\n", | ||
301 | &base, size); | ||
302 | return -EINVAL; | ||
303 | } | ||
304 | if (size == 0) | ||
305 | return -EINVAL; | ||
306 | |||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | /** | ||
311 | * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends. | ||
312 | * | ||
313 | * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the | ||
314 | * value in the register. We need to subtract IMR_ALIGN bytes from input sizes | ||
315 | * as a result. | ||
316 | * | ||
317 | * @size: input size bytes. | ||
318 | * @return: reduced size. | ||
319 | */ | ||
320 | static inline size_t imr_raw_size(size_t size) | ||
321 | { | ||
322 | return size - IMR_ALIGN; | ||
323 | } | ||
324 | |||
325 | /** | ||
326 | * imr_address_overlap - detects an address overlap. | ||
327 | * | ||
328 | * @addr: address to check against an existing IMR. | ||
329 | * @imr: imr being checked. | ||
330 | * @return: true for overlap false for no overlap. | ||
331 | */ | ||
332 | static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr) | ||
333 | { | ||
334 | return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi); | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * imr_add_range - add an Isolated Memory Region. | ||
339 | * | ||
340 | * @base: physical base address of region aligned to 1KiB. | ||
341 | * @size: physical size of region in bytes must be aligned to 1KiB. | ||
342 | * @read_mask: read access mask. | ||
343 | * @write_mask: write access mask. | ||
344 | * @lock: indicates whether or not to permanently lock this region. | ||
345 | * @return: zero on success or negative value indicating error. | ||
346 | */ | ||
347 | int imr_add_range(phys_addr_t base, size_t size, | ||
348 | unsigned int rmask, unsigned int wmask, bool lock) | ||
349 | { | ||
350 | phys_addr_t end; | ||
351 | unsigned int i; | ||
352 | struct imr_device *idev = &imr_dev; | ||
353 | struct imr_regs imr; | ||
354 | size_t raw_size; | ||
355 | int reg; | ||
356 | int ret; | ||
357 | |||
358 | if (WARN_ONCE(idev->init == false, "driver not initialized")) | ||
359 | return -ENODEV; | ||
360 | |||
361 | ret = imr_check_params(base, size); | ||
362 | if (ret) | ||
363 | return ret; | ||
364 | |||
365 | /* Tweak the size value. */ | ||
366 | raw_size = imr_raw_size(size); | ||
367 | end = base + raw_size; | ||
368 | |||
369 | /* | ||
370 | * Check for reserved IMR value common to firmware, kernel and grub | ||
371 | * indicating a disabled IMR. | ||
372 | */ | ||
373 | imr.addr_lo = phys_to_imr(base); | ||
374 | imr.addr_hi = phys_to_imr(end); | ||
375 | imr.rmask = rmask; | ||
376 | imr.wmask = wmask; | ||
377 | if (!imr_is_enabled(&imr)) | ||
378 | return -ENOTSUPP; | ||
379 | |||
380 | mutex_lock(&idev->lock); | ||
381 | |||
382 | /* | ||
383 | * Find a free IMR while checking for an existing overlapping range. | ||
384 | * Note there's no restriction in silicon to prevent IMR overlaps. | ||
385 | * For the sake of simplicity and ease in defining/debugging an IMR | ||
386 | * memory map we exclude IMR overlaps. | ||
387 | */ | ||
388 | reg = -1; | ||
389 | for (i = 0; i < idev->max_imr; i++) { | ||
390 | ret = imr_read(idev, i, &imr); | ||
391 | if (ret) | ||
392 | goto failed; | ||
393 | |||
394 | /* Find overlap @ base or end of requested range. */ | ||
395 | ret = -EINVAL; | ||
396 | if (imr_is_enabled(&imr)) { | ||
397 | if (imr_address_overlap(base, &imr)) | ||
398 | goto failed; | ||
399 | if (imr_address_overlap(end, &imr)) | ||
400 | goto failed; | ||
401 | } else { | ||
402 | reg = i; | ||
403 | } | ||
404 | } | ||
405 | |||
406 | /* Error out if we have no free IMR entries. */ | ||
407 | if (reg == -1) { | ||
408 | ret = -ENOMEM; | ||
409 | goto failed; | ||
410 | } | ||
411 | |||
412 | pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n", | ||
413 | reg, &base, &end, raw_size, rmask, wmask); | ||
414 | |||
415 | /* Enable IMR at specified range and access mask. */ | ||
416 | imr.addr_lo = phys_to_imr(base); | ||
417 | imr.addr_hi = phys_to_imr(end); | ||
418 | imr.rmask = rmask; | ||
419 | imr.wmask = wmask; | ||
420 | |||
421 | ret = imr_write(idev, reg, &imr, lock); | ||
422 | if (ret < 0) { | ||
423 | /* | ||
424 | * In the highly unlikely event iosf_mbi_write failed | ||
425 | * attempt to rollback the IMR setup skipping the trapping | ||
426 | * of further IOSF write failures. | ||
427 | */ | ||
428 | imr.addr_lo = 0; | ||
429 | imr.addr_hi = 0; | ||
430 | imr.rmask = IMR_READ_ACCESS_ALL; | ||
431 | imr.wmask = IMR_WRITE_ACCESS_ALL; | ||
432 | imr_write(idev, reg, &imr, false); | ||
433 | } | ||
434 | failed: | ||
435 | mutex_unlock(&idev->lock); | ||
436 | return ret; | ||
437 | } | ||
438 | EXPORT_SYMBOL_GPL(imr_add_range); | ||
439 | |||
440 | /** | ||
441 | * __imr_remove_range - delete an Isolated Memory Region. | ||
442 | * | ||
443 | * This function allows you to delete an IMR by its index specified by reg or | ||
444 | * by address range specified by base and size respectively. If you specify an | ||
445 | * index on its own the base and size parameters are ignored. | ||
446 | * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored. | ||
447 | * imr_remove_range(-1, base, size); delete IMR from base to base+size. | ||
448 | * | ||
449 | * @reg: imr index to remove. | ||
450 | * @base: physical base address of region aligned to 1 KiB. | ||
451 | * @size: physical size of region in bytes aligned to 1 KiB. | ||
452 | * @return: -EINVAL on invalid range or out or range id | ||
453 | * -ENODEV if reg is valid but no IMR exists or is locked | ||
454 | * 0 on success. | ||
455 | */ | ||
456 | static int __imr_remove_range(int reg, phys_addr_t base, size_t size) | ||
457 | { | ||
458 | phys_addr_t end; | ||
459 | bool found = false; | ||
460 | unsigned int i; | ||
461 | struct imr_device *idev = &imr_dev; | ||
462 | struct imr_regs imr; | ||
463 | size_t raw_size; | ||
464 | int ret = 0; | ||
465 | |||
466 | if (WARN_ONCE(idev->init == false, "driver not initialized")) | ||
467 | return -ENODEV; | ||
468 | |||
469 | /* | ||
470 | * Validate address range if deleting by address, else we are | ||
471 | * deleting by index where base and size will be ignored. | ||
472 | */ | ||
473 | if (reg == -1) { | ||
474 | ret = imr_check_params(base, size); | ||
475 | if (ret) | ||
476 | return ret; | ||
477 | } | ||
478 | |||
479 | /* Tweak the size value. */ | ||
480 | raw_size = imr_raw_size(size); | ||
481 | end = base + raw_size; | ||
482 | |||
483 | mutex_lock(&idev->lock); | ||
484 | |||
485 | if (reg >= 0) { | ||
486 | /* If a specific IMR is given try to use it. */ | ||
487 | ret = imr_read(idev, reg, &imr); | ||
488 | if (ret) | ||
489 | goto failed; | ||
490 | |||
491 | if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) { | ||
492 | ret = -ENODEV; | ||
493 | goto failed; | ||
494 | } | ||
495 | found = true; | ||
496 | } else { | ||
497 | /* Search for match based on address range. */ | ||
498 | for (i = 0; i < idev->max_imr; i++) { | ||
499 | ret = imr_read(idev, i, &imr); | ||
500 | if (ret) | ||
501 | goto failed; | ||
502 | |||
503 | if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) | ||
504 | continue; | ||
505 | |||
506 | if ((imr_to_phys(imr.addr_lo) == base) && | ||
507 | (imr_to_phys(imr.addr_hi) == end)) { | ||
508 | found = true; | ||
509 | reg = i; | ||
510 | break; | ||
511 | } | ||
512 | } | ||
513 | } | ||
514 | |||
515 | if (!found) { | ||
516 | ret = -ENODEV; | ||
517 | goto failed; | ||
518 | } | ||
519 | |||
520 | pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size); | ||
521 | |||
522 | /* Tear down the IMR. */ | ||
523 | imr.addr_lo = 0; | ||
524 | imr.addr_hi = 0; | ||
525 | imr.rmask = IMR_READ_ACCESS_ALL; | ||
526 | imr.wmask = IMR_WRITE_ACCESS_ALL; | ||
527 | |||
528 | ret = imr_write(idev, reg, &imr, false); | ||
529 | |||
530 | failed: | ||
531 | mutex_unlock(&idev->lock); | ||
532 | return ret; | ||
533 | } | ||
534 | |||
535 | /** | ||
536 | * imr_remove_range - delete an Isolated Memory Region by address | ||
537 | * | ||
538 | * This function allows you to delete an IMR by an address range specified | ||
539 | * by base and size respectively. | ||
540 | * imr_remove_range(base, size); delete IMR from base to base+size. | ||
541 | * | ||
542 | * @base: physical base address of region aligned to 1 KiB. | ||
543 | * @size: physical size of region in bytes aligned to 1 KiB. | ||
544 | * @return: -EINVAL on invalid range or out or range id | ||
545 | * -ENODEV if reg is valid but no IMR exists or is locked | ||
546 | * 0 on success. | ||
547 | */ | ||
548 | int imr_remove_range(phys_addr_t base, size_t size) | ||
549 | { | ||
550 | return __imr_remove_range(-1, base, size); | ||
551 | } | ||
552 | EXPORT_SYMBOL_GPL(imr_remove_range); | ||
553 | |||
554 | /** | ||
555 | * imr_clear - delete an Isolated Memory Region by index | ||
556 | * | ||
557 | * This function allows you to delete an IMR by an address range specified | ||
558 | * by the index of the IMR. Useful for initial sanitization of the IMR | ||
559 | * address map. | ||
560 | * imr_ge(base, size); delete IMR from base to base+size. | ||
561 | * | ||
562 | * @reg: imr index to remove. | ||
563 | * @return: -EINVAL on invalid range or out or range id | ||
564 | * -ENODEV if reg is valid but no IMR exists or is locked | ||
565 | * 0 on success. | ||
566 | */ | ||
567 | static inline int imr_clear(int reg) | ||
568 | { | ||
569 | return __imr_remove_range(reg, 0, 0); | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * imr_fixup_memmap - Tear down IMRs used during bootup. | ||
574 | * | ||
575 | * BIOS and Grub both setup IMRs around compressed kernel, initrd memory | ||
576 | * that need to be removed before the kernel hands out one of the IMR | ||
577 | * encased addresses to a downstream DMA agent such as the SD or Ethernet. | ||
578 | * IMRs on Galileo are setup to immediately reset the system on violation. | ||
579 | * As a result if you're running a root filesystem from SD - you'll need | ||
580 | * the boot-time IMRs torn down or you'll find seemingly random resets when | ||
581 | * using your filesystem. | ||
582 | * | ||
583 | * @idev: pointer to imr_device structure. | ||
584 | * @return: | ||
585 | */ | ||
586 | static void __init imr_fixup_memmap(struct imr_device *idev) | ||
587 | { | ||
588 | phys_addr_t base = virt_to_phys(&_text); | ||
589 | size_t size = virt_to_phys(&__end_rodata) - base; | ||
590 | int i; | ||
591 | int ret; | ||
592 | |||
593 | /* Tear down all existing unlocked IMRs. */ | ||
594 | for (i = 0; i < idev->max_imr; i++) | ||
595 | imr_clear(i); | ||
596 | |||
597 | /* | ||
598 | * Setup a locked IMR around the physical extent of the kernel | ||
599 | * from the beginning of the .text secton to the end of the | ||
600 | * .rodata section as one physically contiguous block. | ||
601 | */ | ||
602 | ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); | ||
603 | if (ret < 0) { | ||
604 | pr_err("unable to setup IMR for kernel: (%p - %p)\n", | ||
605 | &_text, &__end_rodata); | ||
606 | } else { | ||
607 | pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n", | ||
608 | size / 1024, &_text, &__end_rodata); | ||
609 | } | ||
610 | |||
611 | } | ||
612 | |||
613 | static const struct x86_cpu_id imr_ids[] __initconst = { | ||
614 | { X86_VENDOR_INTEL, 5, 9 }, /* Intel Quark SoC X1000. */ | ||
615 | {} | ||
616 | }; | ||
617 | MODULE_DEVICE_TABLE(x86cpu, imr_ids); | ||
618 | |||
619 | /** | ||
620 | * imr_init - entry point for IMR driver. | ||
621 | * | ||
622 | * return: -ENODEV for no IMR support 0 if good to go. | ||
623 | */ | ||
624 | static int __init imr_init(void) | ||
625 | { | ||
626 | struct imr_device *idev = &imr_dev; | ||
627 | int ret; | ||
628 | |||
629 | if (!x86_match_cpu(imr_ids) || !iosf_mbi_available()) | ||
630 | return -ENODEV; | ||
631 | |||
632 | idev->max_imr = QUARK_X1000_IMR_MAX; | ||
633 | idev->reg_base = QUARK_X1000_IMR_REGBASE; | ||
634 | idev->init = true; | ||
635 | |||
636 | mutex_init(&idev->lock); | ||
637 | ret = imr_debugfs_register(idev); | ||
638 | if (ret != 0) | ||
639 | pr_warn("debugfs register failed!\n"); | ||
640 | imr_fixup_memmap(idev); | ||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | /** | ||
645 | * imr_exit - exit point for IMR code. | ||
646 | * | ||
647 | * Deregisters debugfs, leave IMR state as-is. | ||
648 | * | ||
649 | * return: | ||
650 | */ | ||
651 | static void __exit imr_exit(void) | ||
652 | { | ||
653 | imr_debugfs_unregister(&imr_dev); | ||
654 | } | ||
655 | |||
656 | module_init(imr_init); | ||
657 | module_exit(imr_exit); | ||
658 | |||
659 | MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>"); | ||
660 | MODULE_DESCRIPTION("Intel Isolated Memory Region driver"); | ||
661 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c new file mode 100644 index 000000000000..c9a0838890e2 --- /dev/null +++ b/arch/x86/platform/intel-quark/imr_selftest.c | |||
@@ -0,0 +1,129 @@ | |||
1 | /** | ||
2 | * imr_selftest.c | ||
3 | * | ||
4 | * Copyright(c) 2013 Intel Corporation. | ||
5 | * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie> | ||
6 | * | ||
7 | * IMR self test. The purpose of this module is to run a set of tests on the | ||
8 | * IMR API to validate it's sanity. We check for overlapping, reserved | ||
9 | * addresses and setup/teardown sanity. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <asm-generic/sections.h> | ||
14 | #include <asm/imr.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/types.h> | ||
19 | |||
20 | #define SELFTEST KBUILD_MODNAME ": " | ||
21 | /** | ||
22 | * imr_self_test_result - Print result string for self test. | ||
23 | * | ||
24 | * @res: result code - true if test passed false otherwise. | ||
25 | * @fmt: format string. | ||
26 | * ... variadic argument list. | ||
27 | */ | ||
28 | static void __init imr_self_test_result(int res, const char *fmt, ...) | ||
29 | { | ||
30 | va_list vlist; | ||
31 | |||
32 | /* Print pass/fail. */ | ||
33 | if (res) | ||
34 | pr_info(SELFTEST "pass "); | ||
35 | else | ||
36 | pr_info(SELFTEST "fail "); | ||
37 | |||
38 | /* Print variable string. */ | ||
39 | va_start(vlist, fmt); | ||
40 | vprintk(fmt, vlist); | ||
41 | va_end(vlist); | ||
42 | |||
43 | /* Optional warning. */ | ||
44 | WARN(res == 0, "test failed"); | ||
45 | } | ||
46 | #undef SELFTEST | ||
47 | |||
48 | /** | ||
49 | * imr_self_test | ||
50 | * | ||
51 | * Verify IMR self_test with some simple tests to verify overlap, | ||
52 | * zero sized allocations and 1 KiB sized areas. | ||
53 | * | ||
54 | */ | ||
55 | static void __init imr_self_test(void) | ||
56 | { | ||
57 | phys_addr_t base = virt_to_phys(&_text); | ||
58 | size_t size = virt_to_phys(&__end_rodata) - base; | ||
59 | const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n"; | ||
60 | int ret; | ||
61 | |||
62 | /* Test zero zero. */ | ||
63 | ret = imr_add_range(0, 0, 0, 0, false); | ||
64 | imr_self_test_result(ret < 0, "zero sized IMR\n"); | ||
65 | |||
66 | /* Test exact overlap. */ | ||
67 | ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); | ||
68 | imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); | ||
69 | |||
70 | /* Test overlap with base inside of existing. */ | ||
71 | base += size - IMR_ALIGN; | ||
72 | ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); | ||
73 | imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); | ||
74 | |||
75 | /* Test overlap with end inside of existing. */ | ||
76 | base -= size + IMR_ALIGN * 2; | ||
77 | ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); | ||
78 | imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); | ||
79 | |||
80 | /* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */ | ||
81 | ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL, | ||
82 | IMR_WRITE_ACCESS_ALL, false); | ||
83 | imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n"); | ||
84 | |||
85 | /* Test that a 1 KiB IMR @ zero with CPU only will work. */ | ||
86 | ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU, false); | ||
87 | imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n"); | ||
88 | if (ret >= 0) { | ||
89 | ret = imr_remove_range(0, IMR_ALIGN); | ||
90 | imr_self_test_result(ret == 0, "teardown - cpu-access\n"); | ||
91 | } | ||
92 | |||
93 | /* Test 2 KiB works. */ | ||
94 | size = IMR_ALIGN * 2; | ||
95 | ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL, | ||
96 | IMR_WRITE_ACCESS_ALL, false); | ||
97 | imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n"); | ||
98 | if (ret >= 0) { | ||
99 | ret = imr_remove_range(0, size); | ||
100 | imr_self_test_result(ret == 0, "teardown 2KiB\n"); | ||
101 | } | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * imr_self_test_init - entry point for IMR driver. | ||
106 | * | ||
107 | * return: -ENODEV for no IMR support 0 if good to go. | ||
108 | */ | ||
109 | static int __init imr_self_test_init(void) | ||
110 | { | ||
111 | imr_self_test(); | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | /** | ||
116 | * imr_self_test_exit - exit point for IMR code. | ||
117 | * | ||
118 | * return: | ||
119 | */ | ||
120 | static void __exit imr_self_test_exit(void) | ||
121 | { | ||
122 | } | ||
123 | |||
124 | module_init(imr_self_test_init); | ||
125 | module_exit(imr_self_test_exit); | ||
126 | |||
127 | MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>"); | ||
128 | MODULE_DESCRIPTION("Intel Isolated Memory Region self-test driver"); | ||
129 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c index c6b146e67116..7488cafab955 100644 --- a/arch/x86/platform/uv/uv_nmi.c +++ b/arch/x86/platform/uv/uv_nmi.c | |||
@@ -273,20 +273,6 @@ static inline void uv_clear_nmi(int cpu) | |||
273 | } | 273 | } |
274 | } | 274 | } |
275 | 275 | ||
276 | /* Print non-responding cpus */ | ||
277 | static void uv_nmi_nr_cpus_pr(char *fmt) | ||
278 | { | ||
279 | static char cpu_list[1024]; | ||
280 | int len = sizeof(cpu_list); | ||
281 | int c = cpumask_weight(uv_nmi_cpu_mask); | ||
282 | int n = cpulist_scnprintf(cpu_list, len, uv_nmi_cpu_mask); | ||
283 | |||
284 | if (n >= len-1) | ||
285 | strcpy(&cpu_list[len - 6], "...\n"); | ||
286 | |||
287 | printk(fmt, c, cpu_list); | ||
288 | } | ||
289 | |||
290 | /* Ping non-responding cpus attemping to force them into the NMI handler */ | 276 | /* Ping non-responding cpus attemping to force them into the NMI handler */ |
291 | static void uv_nmi_nr_cpus_ping(void) | 277 | static void uv_nmi_nr_cpus_ping(void) |
292 | { | 278 | { |
@@ -371,16 +357,19 @@ static void uv_nmi_wait(int master) | |||
371 | break; | 357 | break; |
372 | 358 | ||
373 | /* if not all made it in, send IPI NMI to them */ | 359 | /* if not all made it in, send IPI NMI to them */ |
374 | uv_nmi_nr_cpus_pr(KERN_ALERT | 360 | pr_alert("UV: Sending NMI IPI to %d non-responding CPUs: %*pbl\n", |
375 | "UV: Sending NMI IPI to %d non-responding CPUs: %s\n"); | 361 | cpumask_weight(uv_nmi_cpu_mask), |
362 | cpumask_pr_args(uv_nmi_cpu_mask)); | ||
363 | |||
376 | uv_nmi_nr_cpus_ping(); | 364 | uv_nmi_nr_cpus_ping(); |
377 | 365 | ||
378 | /* if all cpus are in, then done */ | 366 | /* if all cpus are in, then done */ |
379 | if (!uv_nmi_wait_cpus(0)) | 367 | if (!uv_nmi_wait_cpus(0)) |
380 | break; | 368 | break; |
381 | 369 | ||
382 | uv_nmi_nr_cpus_pr(KERN_ALERT | 370 | pr_alert("UV: %d CPUs not in NMI loop: %*pbl\n", |
383 | "UV: %d CPUs not in NMI loop: %s\n"); | 371 | cpumask_weight(uv_nmi_cpu_mask), |
372 | cpumask_pr_args(uv_nmi_cpu_mask)); | ||
384 | } while (0); | 373 | } while (0); |
385 | 374 | ||
386 | pr_alert("UV: %d of %d CPUs in NMI\n", | 375 | pr_alert("UV: %d of %d CPUs in NMI\n", |