aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorVarun Sethi <Varun.Sethi@freescale.com>2013-07-15 00:50:57 -0400
committerJoerg Roedel <joro@8bytes.org>2013-08-14 05:38:34 -0400
commit695093e38c3ef63fcb43a2840ed865efa20671d5 (patch)
treeb4e474176ced242d9bd5d1dadfb08c7cc817e9bd /drivers/iommu
parent7cabf491eb6d52ea7f166b287702890cae0c803d (diff)
iommu/fsl: Freescale PAMU driver and iommu implementation.
Following is a brief description of the PAMU hardware: PAMU determines what action to take and whether to authorize the action on the basis of the memory address, a Logical IO Device Number (LIODN), and PAACT table (logically) indexed by LIODN and address. Hardware devices which need to access memory must provide an LIODN in addition to the memory address. Peripheral Access Authorization and Control Tables (PAACTs) are the primary data structures used by PAMU. A PAACT is a table of peripheral access authorization and control entries (PAACE).Each PAACE defines the range of I/O bus address space that is accessible by the LIOD and the associated access capabilities. There are two types of PAACTs: primary PAACT (PPAACT) and secondary PAACT (SPAACT).A given physical I/O device may be able to act as one or more independent logical I/O devices (LIODs). Each such logical I/O device is assigned an identifier called logical I/O device number (LIODN). A LIODN is allocated a contiguous portion of the I/O bus address space called the DSA window for performing DSA operations. The DSA window may optionally be divided into multiple sub-windows, each of which may be used to map to a region in system storage space. The first sub-window is referred to as the primary sub-window and the remaining are called secondary sub-windows. This patch provides the PAMU driver (fsl_pamu.c) and the corresponding IOMMU API implementation (fsl_pamu_domain.c). The PAMU hardware driver (fsl_pamu.c) has been derived from the work done by Ashish Kalra and Timur Tabi. [For iommu group support] Acked-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Timur Tabi <timur@tabi.org> Signed-off-by: Varun Sethi <Varun.Sethi@freescale.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig10
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/fsl_pamu.c1309
-rw-r--r--drivers/iommu/fsl_pamu.h410
-rw-r--r--drivers/iommu/fsl_pamu_domain.c1172
-rw-r--r--drivers/iommu/fsl_pamu_domain.h85
6 files changed, 2987 insertions, 0 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 820d85c4a4a0..fe302e33f72e 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -17,6 +17,16 @@ config OF_IOMMU
17 def_bool y 17 def_bool y
18 depends on OF 18 depends on OF
19 19
20config FSL_PAMU
21 bool "Freescale IOMMU support"
22 depends on PPC_E500MC
23 select IOMMU_API
24 select GENERIC_ALLOCATOR
25 help
26 Freescale PAMU support. PAMU is the IOMMU present on Freescale QorIQ platforms.
27 PAMU can authorize memory access, remap the memory address, and remap I/O
28 transaction types.
29
20# MSM IOMMU support 30# MSM IOMMU support
21config MSM_IOMMU 31config MSM_IOMMU
22 bool "MSM IOMMU Support" 32 bool "MSM IOMMU Support"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index bbe7041212dd..14c1f474cf11 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
16obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o 16obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
17obj-$(CONFIG_SHMOBILE_IOMMU) += shmobile-iommu.o 17obj-$(CONFIG_SHMOBILE_IOMMU) += shmobile-iommu.o
18obj-$(CONFIG_SHMOBILE_IPMMU) += shmobile-ipmmu.o 18obj-$(CONFIG_SHMOBILE_IPMMU) += shmobile-ipmmu.o
19obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
new file mode 100644
index 000000000000..a831fee32399
--- /dev/null
+++ b/drivers/iommu/fsl_pamu.c
@@ -0,0 +1,1309 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
16 *
17 */
18
19#define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__
20
21#include <linux/init.h>
22#include <linux/iommu.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/mm.h>
27#include <linux/interrupt.h>
28#include <linux/device.h>
29#include <linux/of_platform.h>
30#include <linux/bootmem.h>
31#include <linux/genalloc.h>
32#include <asm/io.h>
33#include <asm/bitops.h>
34#include <asm/fsl_guts.h>
35
36#include "fsl_pamu.h"
37
38/* define indexes for each operation mapping scenario */
39#define OMI_QMAN 0x00
40#define OMI_FMAN 0x01
41#define OMI_QMAN_PRIV 0x02
42#define OMI_CAAM 0x03
43
44#define make64(high, low) (((u64)(high) << 32) | (low))
45
46struct pamu_isr_data {
47 void __iomem *pamu_reg_base; /* Base address of PAMU regs*/
48 unsigned int count; /* The number of PAMUs */
49};
50
51static struct paace *ppaact;
52static struct paace *spaact;
53static struct ome *omt;
54
55/*
56 * Table for matching compatible strings, for device tree
57 * guts node, for QorIQ SOCs.
58 * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
59 * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
60 * string would be used.
61*/
62static const struct of_device_id guts_device_ids[] = {
63 { .compatible = "fsl,qoriq-device-config-1.0", },
64 { .compatible = "fsl,qoriq-device-config-2.0", },
65 {}
66};
67
68
69/*
70 * Table for matching compatible strings, for device tree
71 * L3 cache controller node.
72 * "fsl,t4240-l3-cache-controller" corresponds to T4,
73 * "fsl,b4860-l3-cache-controller" corresponds to B4 &
74 * "fsl,p4080-l3-cache-controller" corresponds to other,
75 * SOCs.
76*/
77static const struct of_device_id l3_device_ids[] = {
78 { .compatible = "fsl,t4240-l3-cache-controller", },
79 { .compatible = "fsl,b4860-l3-cache-controller", },
80 { .compatible = "fsl,p4080-l3-cache-controller", },
81 {}
82};
83
84/* maximum subwindows permitted per liodn */
85static u32 max_subwindow_count;
86
87/* Pool for fspi allocation */
88struct gen_pool *spaace_pool;
89
90/**
91 * pamu_get_max_subwin_cnt() - Return the maximum supported
92 * subwindow count per liodn.
93 *
94 */
95u32 pamu_get_max_subwin_cnt()
96{
97 return max_subwindow_count;
98}
99
100/**
101 * pamu_get_ppaace() - Return the primary PACCE
102 * @liodn: liodn PAACT index for desired PAACE
103 *
104 * Returns the ppace pointer upon success else return
105 * null.
106 */
107static struct paace *pamu_get_ppaace(int liodn)
108{
109 if (!ppaact || liodn >= PAACE_NUMBER_ENTRIES) {
110 pr_debug("PPAACT doesn't exist\n");
111 return NULL;
112 }
113
114 return &ppaact[liodn];
115}
116
117/**
118 * pamu_enable_liodn() - Set valid bit of PACCE
119 * @liodn: liodn PAACT index for desired PAACE
120 *
121 * Returns 0 upon success else error code < 0 returned
122 */
123int pamu_enable_liodn(int liodn)
124{
125 struct paace *ppaace;
126
127 ppaace = pamu_get_ppaace(liodn);
128 if (!ppaace) {
129 pr_debug("Invalid primary paace entry\n");
130 return -ENOENT;
131 }
132
133 if (!get_bf(ppaace->addr_bitfields, PPAACE_AF_WSE)) {
134 pr_debug("liodn %d not configured\n", liodn);
135 return -EINVAL;
136 }
137
138 /* Ensure that all other stores to the ppaace complete first */
139 mb();
140
141 set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
142 mb();
143
144 return 0;
145}
146
147/**
148 * pamu_disable_liodn() - Clears valid bit of PACCE
149 * @liodn: liodn PAACT index for desired PAACE
150 *
151 * Returns 0 upon success else error code < 0 returned
152 */
153int pamu_disable_liodn(int liodn)
154{
155 struct paace *ppaace;
156
157 ppaace = pamu_get_ppaace(liodn);
158 if (!ppaace) {
159 pr_debug("Invalid primary paace entry\n");
160 return -ENOENT;
161 }
162
163 set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
164 mb();
165
166 return 0;
167}
168
169/* Derive the window size encoding for a particular PAACE entry */
170static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
171{
172 /* Bug if not a power of 2 */
173 BUG_ON(!is_power_of_2(addrspace_size));
174
175 /* window size is 2^(WSE+1) bytes */
176 return __ffs(addrspace_size) - 1;
177}
178
179/* Derive the PAACE window count encoding for the subwindow count */
180static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
181{
182 /* window count is 2^(WCE+1) bytes */
183 return __ffs(subwindow_cnt) - 1;
184}
185
186/*
187 * Set the PAACE type as primary and set the coherency required domain
188 * attribute
189 */
190static void pamu_init_ppaace(struct paace *ppaace)
191{
192 set_bf(ppaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_PRIMARY);
193
194 set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
195 PAACE_M_COHERENCE_REQ);
196}
197
198/*
199 * Set the PAACE type as secondary and set the coherency required domain
200 * attribute.
201 */
202static void pamu_init_spaace(struct paace *spaace)
203{
204 set_bf(spaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_SECONDARY);
205 set_bf(spaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
206 PAACE_M_COHERENCE_REQ);
207}
208
209/*
210 * Return the spaace (corresponding to the secondary window index)
211 * for a particular ppaace.
212 */
213static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
214{
215 u32 subwin_cnt;
216 struct paace *spaace = NULL;
217
218 subwin_cnt = 1UL << (get_bf(paace->impl_attr, PAACE_IA_WCE) + 1);
219
220 if (wnum < subwin_cnt)
221 spaace = &spaact[paace->fspi + wnum];
222 else
223 pr_debug("secondary paace out of bounds\n");
224
225 return spaace;
226}
227
228/**
229 * pamu_get_fspi_and_allocate() - Allocates fspi index and reserves subwindows
230 * required for primary PAACE in the secondary
231 * PAACE table.
232 * @subwin_cnt: Number of subwindows to be reserved.
233 *
234 * A PPAACE entry may have a number of associated subwindows. A subwindow
235 * corresponds to a SPAACE entry in the SPAACT table. Each PAACE entry stores
236 * the index (fspi) of the first SPAACE entry in the SPAACT table. This
237 * function returns the index of the first SPAACE entry. The remaining
238 * SPAACE entries are reserved contiguously from that index.
239 *
240 * Returns a valid fspi index in the range of 0 - SPAACE_NUMBER_ENTRIES on success.
241 * If no SPAACE entry is available or the allocator can not reserve the required
242 * number of contiguous entries function returns ULONG_MAX indicating a failure.
243 *
244*/
245static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
246{
247 unsigned long spaace_addr;
248
249 spaace_addr = gen_pool_alloc(spaace_pool, subwin_cnt * sizeof(struct paace));
250 if (!spaace_addr)
251 return ULONG_MAX;
252
253 return (spaace_addr - (unsigned long)spaact) / (sizeof(struct paace));
254}
255
256/* Release the subwindows reserved for a particular LIODN */
257void pamu_free_subwins(int liodn)
258{
259 struct paace *ppaace;
260 u32 subwin_cnt, size;
261
262 ppaace = pamu_get_ppaace(liodn);
263 if (!ppaace) {
264 pr_debug("Invalid liodn entry\n");
265 return;
266 }
267
268 if (get_bf(ppaace->addr_bitfields, PPAACE_AF_MW)) {
269 subwin_cnt = 1UL << (get_bf(ppaace->impl_attr, PAACE_IA_WCE) + 1);
270 size = (subwin_cnt - 1) * sizeof(struct paace);
271 gen_pool_free(spaace_pool, (unsigned long)&spaact[ppaace->fspi], size);
272 set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
273 }
274}
275
276/*
277 * Function used for updating stash destination for the coressponding
278 * LIODN.
279 */
280int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
281{
282 struct paace *paace;
283
284 paace = pamu_get_ppaace(liodn);
285 if (!paace) {
286 pr_debug("Invalid liodn entry\n");
287 return -ENOENT;
288 }
289 if (subwin) {
290 paace = pamu_get_spaace(paace, subwin - 1);
291 if (!paace) {
292 return -ENOENT;
293 }
294 }
295 set_bf(paace->impl_attr, PAACE_IA_CID, value);
296
297 mb();
298
299 return 0;
300}
301
302/* Disable a subwindow corresponding to the LIODN */
303int pamu_disable_spaace(int liodn, u32 subwin)
304{
305 struct paace *paace;
306
307 paace = pamu_get_ppaace(liodn);
308 if (!paace) {
309 pr_debug("Invalid liodn entry\n");
310 return -ENOENT;
311 }
312 if (subwin) {
313 paace = pamu_get_spaace(paace, subwin - 1);
314 if (!paace) {
315 return -ENOENT;
316 }
317 set_bf(paace->addr_bitfields, PAACE_AF_V,
318 PAACE_V_INVALID);
319 } else {
320 set_bf(paace->addr_bitfields, PAACE_AF_AP,
321 PAACE_AP_PERMS_DENIED);
322 }
323
324 mb();
325
326 return 0;
327}
328
329
330/**
331 * pamu_config_paace() - Sets up PPAACE entry for specified liodn
332 *
333 * @liodn: Logical IO device number
334 * @win_addr: starting address of DSA window
335 * @win-size: size of DSA window
336 * @omi: Operation mapping index -- if ~omi == 0 then omi not defined
337 * @rpn: real (true physical) page number
338 * @stashid: cache stash id for associated cpu -- if ~stashid == 0 then
339 * stashid not defined
340 * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
341 * snoopid not defined
342 * @subwin_cnt: number of sub-windows
343 * @prot: window permissions
344 *
345 * Returns 0 upon success else error code < 0 returned
346 */
347int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
348 u32 omi, unsigned long rpn, u32 snoopid, u32 stashid,
349 u32 subwin_cnt, int prot)
350{
351 struct paace *ppaace;
352 unsigned long fspi;
353
354 if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) {
355 pr_debug("window size too small or not a power of two %llx\n", win_size);
356 return -EINVAL;
357 }
358
359 if (win_addr & (win_size - 1)) {
360 pr_debug("window address is not aligned with window size\n");
361 return -EINVAL;
362 }
363
364 ppaace = pamu_get_ppaace(liodn);
365 if (!ppaace) {
366 return -ENOENT;
367 }
368
369 /* window size is 2^(WSE+1) bytes */
370 set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
371 map_addrspace_size_to_wse(win_size));
372
373 pamu_init_ppaace(ppaace);
374
375 ppaace->wbah = win_addr >> (PAMU_PAGE_SHIFT + 20);
376 set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL,
377 (win_addr >> PAMU_PAGE_SHIFT));
378
379 /* set up operation mapping if it's configured */
380 if (omi < OME_NUMBER_ENTRIES) {
381 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
382 ppaace->op_encode.index_ot.omi = omi;
383 } else if (~omi != 0) {
384 pr_debug("bad operation mapping index: %d\n", omi);
385 return -EINVAL;
386 }
387
388 /* configure stash id */
389 if (~stashid != 0)
390 set_bf(ppaace->impl_attr, PAACE_IA_CID, stashid);
391
392 /* configure snoop id */
393 if (~snoopid != 0)
394 ppaace->domain_attr.to_host.snpid = snoopid;
395
396 if (subwin_cnt) {
397 /* The first entry is in the primary PAACE instead */
398 fspi = pamu_get_fspi_and_allocate(subwin_cnt - 1);
399 if (fspi == ULONG_MAX) {
400 pr_debug("spaace indexes exhausted\n");
401 return -EINVAL;
402 }
403
404 /* window count is 2^(WCE+1) bytes */
405 set_bf(ppaace->impl_attr, PAACE_IA_WCE,
406 map_subwindow_cnt_to_wce(subwin_cnt));
407 set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0x1);
408 ppaace->fspi = fspi;
409 } else {
410 set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
411 ppaace->twbah = rpn >> 20;
412 set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, rpn);
413 set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot);
414 set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0);
415 set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
416 }
417 mb();
418
419 return 0;
420}
421
422/**
423 * pamu_config_spaace() - Sets up SPAACE entry for specified subwindow
424 *
425 * @liodn: Logical IO device number
426 * @subwin_cnt: number of sub-windows associated with dma-window
427 * @subwin: subwindow index
428 * @subwin_size: size of subwindow
429 * @omi: Operation mapping index
430 * @rpn: real (true physical) page number
431 * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
432 * snoopid not defined
433 * @stashid: cache stash id for associated cpu
434 * @enable: enable/disable subwindow after reconfiguration
435 * @prot: sub window permissions
436 *
437 * Returns 0 upon success else error code < 0 returned
438 */
439int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
440 phys_addr_t subwin_size, u32 omi, unsigned long rpn,
441 u32 snoopid, u32 stashid, int enable, int prot)
442{
443 struct paace *paace;
444
445
446 /* setup sub-windows */
447 if (!subwin_cnt) {
448 pr_debug("Invalid subwindow count\n");
449 return -EINVAL;
450 }
451
452 paace = pamu_get_ppaace(liodn);
453 if (subwin > 0 && subwin < subwin_cnt && paace) {
454 paace = pamu_get_spaace(paace, subwin - 1);
455
456 if (paace && !(paace->addr_bitfields & PAACE_V_VALID)) {
457 pamu_init_spaace(paace);
458 set_bf(paace->addr_bitfields, SPAACE_AF_LIODN, liodn);
459 }
460 }
461
462 if (!paace) {
463 pr_debug("Invalid liodn entry\n");
464 return -ENOENT;
465 }
466
467 if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) {
468 pr_debug("subwindow size out of range, or not a power of 2\n");
469 return -EINVAL;
470 }
471
472 if (rpn == ULONG_MAX) {
473 pr_debug("real page number out of range\n");
474 return -EINVAL;
475 }
476
477 /* window size is 2^(WSE+1) bytes */
478 set_bf(paace->win_bitfields, PAACE_WIN_SWSE,
479 map_addrspace_size_to_wse(subwin_size));
480
481 set_bf(paace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
482 paace->twbah = rpn >> 20;
483 set_bf(paace->win_bitfields, PAACE_WIN_TWBAL, rpn);
484 set_bf(paace->addr_bitfields, PAACE_AF_AP, prot);
485
486 /* configure snoop id */
487 if (~snoopid != 0)
488 paace->domain_attr.to_host.snpid = snoopid;
489
490 /* set up operation mapping if it's configured */
491 if (omi < OME_NUMBER_ENTRIES) {
492 set_bf(paace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
493 paace->op_encode.index_ot.omi = omi;
494 } else if (~omi != 0) {
495 pr_debug("bad operation mapping index: %d\n", omi);
496 return -EINVAL;
497 }
498
499 if (~stashid != 0)
500 set_bf(paace->impl_attr, PAACE_IA_CID, stashid);
501
502 smp_wmb();
503
504 if (enable)
505 set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
506
507 mb();
508
509 return 0;
510}
511
512/**
513* get_ome_index() - Returns the index in the operation mapping table
514* for device.
515* @*omi_index: pointer for storing the index value
516*
517*/
518void get_ome_index(u32 *omi_index, struct device *dev)
519{
520 if (of_device_is_compatible(dev->of_node, "fsl,qman-portal"))
521 *omi_index = OMI_QMAN;
522 if (of_device_is_compatible(dev->of_node, "fsl,qman"))
523 *omi_index = OMI_QMAN_PRIV;
524}
525
526/**
527 * get_stash_id - Returns stash destination id corresponding to a
528 * cache type and vcpu.
529 * @stash_dest_hint: L1, L2 or L3
530 * @vcpu: vpcu target for a particular cache type.
531 *
532 * Returs stash on success or ~(u32)0 on failure.
533 *
534 */
535u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
536{
537 const u32 *prop;
538 struct device_node *node;
539 u32 cache_level;
540 int len, found = 0;
541 int i;
542
543 /* Fastpath, exit early if L3/CPC cache is target for stashing */
544 if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
545 node = of_find_matching_node(NULL, l3_device_ids);
546 if (node) {
547 prop = of_get_property(node, "cache-stash-id", 0);
548 if (!prop) {
549 pr_debug("missing cache-stash-id at %s\n", node->full_name);
550 of_node_put(node);
551 return ~(u32)0;
552 }
553 of_node_put(node);
554 return be32_to_cpup(prop);
555 }
556 return ~(u32)0;
557 }
558
559 for_each_node_by_type(node, "cpu") {
560 prop = of_get_property(node, "reg", &len);
561 for (i = 0; i < len / sizeof(u32); i++) {
562 if (be32_to_cpup(&prop[i]) == vcpu) {
563 found = 1;
564 goto found_cpu_node;
565 }
566 }
567 }
568found_cpu_node:
569
570 /* find the hwnode that represents the cache */
571 for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) {
572 if (stash_dest_hint == cache_level) {
573 prop = of_get_property(node, "cache-stash-id", 0);
574 if (!prop) {
575 pr_debug("missing cache-stash-id at %s\n", node->full_name);
576 of_node_put(node);
577 return ~(u32)0;
578 }
579 of_node_put(node);
580 return be32_to_cpup(prop);
581 }
582
583 prop = of_get_property(node, "next-level-cache", 0);
584 if (!prop) {
585 pr_debug("can't find next-level-cache at %s\n",
586 node->full_name);
587 of_node_put(node);
588 return ~(u32)0; /* can't traverse any further */
589 }
590 of_node_put(node);
591
592 /* advance to next node in cache hierarchy */
593 node = of_find_node_by_phandle(*prop);
594 if (!node) {
595 pr_debug("Invalid node for cache hierarchy %s\n",
596 node->full_name);
597 return ~(u32)0;
598 }
599 }
600
601 pr_debug("stash dest not found for %d on vcpu %d\n",
602 stash_dest_hint, vcpu);
603 return ~(u32)0;
604}
605
606/* Identify if the PAACT table entry belongs to QMAN, BMAN or QMAN Portal */
607#define QMAN_PAACE 1
608#define QMAN_PORTAL_PAACE 2
609#define BMAN_PAACE 3
610
611/**
612 * Setup operation mapping and stash destinations for QMAN and QMAN portal.
613 * Memory accesses to QMAN and BMAN private memory need not be coherent, so
614 * clear the PAACE entry coherency attribute for them.
615 */
616static void setup_qbman_paace(struct paace *ppaace, int paace_type)
617{
618 switch (paace_type) {
619 case QMAN_PAACE:
620 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
621 ppaace->op_encode.index_ot.omi = OMI_QMAN_PRIV;
622 /* setup QMAN Private data stashing for the L3 cache */
623 set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
624 set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
625 0);
626 break;
627 case QMAN_PORTAL_PAACE:
628 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
629 ppaace->op_encode.index_ot.omi = OMI_QMAN;
630 /*Set DQRR and Frame stashing for the L3 cache */
631 set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
632 break;
633 case BMAN_PAACE:
634 set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
635 0);
636 break;
637 }
638}
639
640/**
641 * Setup the operation mapping table for various devices. This is a static
642 * table where each table index corresponds to a particular device. PAMU uses
643 * this table to translate device transaction to appropriate corenet
644 * transaction.
645 */
646static void __init setup_omt(struct ome *omt)
647{
648 struct ome *ome;
649
650 /* Configure OMI_QMAN */
651 ome = &omt[OMI_QMAN];
652
653 ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ;
654 ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
655 ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
656 ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSAO;
657
658 ome->moe[IOE_DIRECT0_IDX] = EOE_VALID | EOE_LDEC;
659 ome->moe[IOE_DIRECT1_IDX] = EOE_VALID | EOE_LDECPE;
660
661 /* Configure OMI_FMAN */
662 ome = &omt[OMI_FMAN];
663 ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI;
664 ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
665
666 /* Configure OMI_QMAN private */
667 ome = &omt[OMI_QMAN_PRIV];
668 ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ;
669 ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
670 ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
671 ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSA;
672
673 /* Configure OMI_CAAM */
674 ome = &omt[OMI_CAAM];
675 ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI;
676 ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
677}
678
679/*
680 * Get the maximum number of PAACT table entries
681 * and subwindows supported by PAMU
682 */
683static void get_pamu_cap_values(unsigned long pamu_reg_base)
684{
685 u32 pc_val;
686
687 pc_val = in_be32((u32 *)(pamu_reg_base + PAMU_PC3));
688 /* Maximum number of subwindows per liodn */
689 max_subwindow_count = 1 << (1 + PAMU_PC3_MWCE(pc_val));
690}
691
692/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
693int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
694 phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
695 phys_addr_t omt_phys)
696{
697 u32 *pc;
698 struct pamu_mmap_regs *pamu_regs;
699
700 pc = (u32 *) (pamu_reg_base + PAMU_PC);
701 pamu_regs = (struct pamu_mmap_regs *)
702 (pamu_reg_base + PAMU_MMAP_REGS_BASE);
703
704 /* set up pointers to corenet control blocks */
705
706 out_be32(&pamu_regs->ppbah, upper_32_bits(ppaact_phys));
707 out_be32(&pamu_regs->ppbal, lower_32_bits(ppaact_phys));
708 ppaact_phys = ppaact_phys + PAACT_SIZE;
709 out_be32(&pamu_regs->pplah, upper_32_bits(ppaact_phys));
710 out_be32(&pamu_regs->pplal, lower_32_bits(ppaact_phys));
711
712 out_be32(&pamu_regs->spbah, upper_32_bits(spaact_phys));
713 out_be32(&pamu_regs->spbal, lower_32_bits(spaact_phys));
714 spaact_phys = spaact_phys + SPAACT_SIZE;
715 out_be32(&pamu_regs->splah, upper_32_bits(spaact_phys));
716 out_be32(&pamu_regs->splal, lower_32_bits(spaact_phys));
717
718 out_be32(&pamu_regs->obah, upper_32_bits(omt_phys));
719 out_be32(&pamu_regs->obal, lower_32_bits(omt_phys));
720 omt_phys = omt_phys + OMT_SIZE;
721 out_be32(&pamu_regs->olah, upper_32_bits(omt_phys));
722 out_be32(&pamu_regs->olal, lower_32_bits(omt_phys));
723
724 /*
725 * set PAMU enable bit,
726 * allow ppaact & omt to be cached
727 * & enable PAMU access violation interrupts.
728 */
729
730 out_be32((u32 *)(pamu_reg_base + PAMU_PICS),
731 PAMU_ACCESS_VIOLATION_ENABLE);
732 out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC);
733 return 0;
734}
735
736/* Enable all device LIODNS */
737static void __init setup_liodns(void)
738{
739 int i, len;
740 struct paace *ppaace;
741 struct device_node *node = NULL;
742 const u32 *prop;
743
744 for_each_node_with_property(node, "fsl,liodn") {
745 prop = of_get_property(node, "fsl,liodn", &len);
746 for (i = 0; i < len / sizeof(u32); i++) {
747 int liodn;
748
749 liodn = be32_to_cpup(&prop[i]);
750 if (liodn >= PAACE_NUMBER_ENTRIES) {
751 pr_debug("Invalid LIODN value %d\n", liodn);
752 continue;
753 }
754 ppaace = pamu_get_ppaace(liodn);
755 pamu_init_ppaace(ppaace);
756 /* window size is 2^(WSE+1) bytes */
757 set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 35);
758 ppaace->wbah = 0;
759 set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
760 set_bf(ppaace->impl_attr, PAACE_IA_ATM,
761 PAACE_ATM_NO_XLATE);
762 set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
763 PAACE_AP_PERMS_ALL);
764 if (of_device_is_compatible(node, "fsl,qman-portal"))
765 setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
766 if (of_device_is_compatible(node, "fsl,qman"))
767 setup_qbman_paace(ppaace, QMAN_PAACE);
768 if (of_device_is_compatible(node, "fsl,bman"))
769 setup_qbman_paace(ppaace, BMAN_PAACE);
770 mb();
771 pamu_enable_liodn(liodn);
772 }
773 }
774}
775
776irqreturn_t pamu_av_isr(int irq, void *arg)
777{
778 struct pamu_isr_data *data = arg;
779 phys_addr_t phys;
780 unsigned int i, j, ret;
781
782 pr_emerg("fsl-pamu: access violation interrupt\n");
783
784 for (i = 0; i < data->count; i++) {
785 void __iomem *p = data->pamu_reg_base + i * PAMU_OFFSET;
786 u32 pics = in_be32(p + PAMU_PICS);
787
788 if (pics & PAMU_ACCESS_VIOLATION_STAT) {
789 u32 avs1 = in_be32(p + PAMU_AVS1);
790 struct paace *paace;
791
792 pr_emerg("POES1=%08x\n", in_be32(p + PAMU_POES1));
793 pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
794 pr_emerg("AVS1=%08x\n", avs1);
795 pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
796 pr_emerg("AVA=%016llx\n", make64(in_be32(p + PAMU_AVAH),
797 in_be32(p + PAMU_AVAL)));
798 pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
799 pr_emerg("POEA=%016llx\n", make64(in_be32(p + PAMU_POEAH),
800 in_be32(p + PAMU_POEAL)));
801
802 phys = make64(in_be32(p + PAMU_POEAH),
803 in_be32(p + PAMU_POEAL));
804
805 /* Assume that POEA points to a PAACE */
806 if (phys) {
807 u32 *paace = phys_to_virt(phys);
808
809 /* Only the first four words are relevant */
810 for (j = 0; j < 4; j++)
811 pr_emerg("PAACE[%u]=%08x\n", j, in_be32(paace + j));
812 }
813
814 /* clear access violation condition */
815 out_be32((p + PAMU_AVS1), avs1 & PAMU_AV_MASK);
816 paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT);
817 BUG_ON(!paace);
818 /* check if we got a violation for a disabled LIODN */
819 if (!get_bf(paace->addr_bitfields, PAACE_AF_V)) {
820 /*
821 * As per hardware erratum A-003638, access
822 * violation can be reported for a disabled
823 * LIODN. If we hit that condition, disable
824 * access violation reporting.
825 */
826 pics &= ~PAMU_ACCESS_VIOLATION_ENABLE;
827 } else {
828 /* Disable the LIODN */
829 ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT);
830 BUG_ON(ret);
831 pr_emerg("Disabling liodn %x\n", avs1 >> PAMU_AVS1_LIODN_SHIFT);
832 }
833 out_be32((p + PAMU_PICS), pics);
834 }
835 }
836
837
838 return IRQ_HANDLED;
839}
840
841#define LAWAR_EN 0x80000000
842#define LAWAR_TARGET_MASK 0x0FF00000
843#define LAWAR_TARGET_SHIFT 20
844#define LAWAR_SIZE_MASK 0x0000003F
845#define LAWAR_CSDID_MASK 0x000FF000
846#define LAWAR_CSDID_SHIFT 12
847
848#define LAW_SIZE_4K 0xb
849
850struct ccsr_law {
851 u32 lawbarh; /* LAWn base address high */
852 u32 lawbarl; /* LAWn base address low */
853 u32 lawar; /* LAWn attributes */
854 u32 reserved;
855};
856
857/*
858 * Create a coherence subdomain for a given memory block.
859 */
860static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
861{
862 struct device_node *np;
863 const __be32 *iprop;
864 void __iomem *lac = NULL; /* Local Access Control registers */
865 struct ccsr_law __iomem *law;
866 void __iomem *ccm = NULL;
867 u32 __iomem *csdids;
868 unsigned int i, num_laws, num_csds;
869 u32 law_target = 0;
870 u32 csd_id = 0;
871 int ret = 0;
872
873 np = of_find_compatible_node(NULL, NULL, "fsl,corenet-law");
874 if (!np)
875 return -ENODEV;
876
877 iprop = of_get_property(np, "fsl,num-laws", NULL);
878 if (!iprop) {
879 ret = -ENODEV;
880 goto error;
881 }
882
883 num_laws = be32_to_cpup(iprop);
884 if (!num_laws) {
885 ret = -ENODEV;
886 goto error;
887 }
888
889 lac = of_iomap(np, 0);
890 if (!lac) {
891 ret = -ENODEV;
892 goto error;
893 }
894
895 /* LAW registers are at offset 0xC00 */
896 law = lac + 0xC00;
897
898 of_node_put(np);
899
900 np = of_find_compatible_node(NULL, NULL, "fsl,corenet-cf");
901 if (!np) {
902 ret = -ENODEV;
903 goto error;
904 }
905
906 iprop = of_get_property(np, "fsl,ccf-num-csdids", NULL);
907 if (!iprop) {
908 ret = -ENODEV;
909 goto error;
910 }
911
912 num_csds = be32_to_cpup(iprop);
913 if (!num_csds) {
914 ret = -ENODEV;
915 goto error;
916 }
917
918 ccm = of_iomap(np, 0);
919 if (!ccm) {
920 ret = -ENOMEM;
921 goto error;
922 }
923
924 /* The undocumented CSDID registers are at offset 0x600 */
925 csdids = ccm + 0x600;
926
927 of_node_put(np);
928 np = NULL;
929
930 /* Find an unused coherence subdomain ID */
931 for (csd_id = 0; csd_id < num_csds; csd_id++) {
932 if (!csdids[csd_id])
933 break;
934 }
935
936 /* Store the Port ID in the (undocumented) proper CIDMRxx register */
937 csdids[csd_id] = csd_port_id;
938
939 /* Find the DDR LAW that maps to our buffer. */
940 for (i = 0; i < num_laws; i++) {
941 if (law[i].lawar & LAWAR_EN) {
942 phys_addr_t law_start, law_end;
943
944 law_start = make64(law[i].lawbarh, law[i].lawbarl);
945 law_end = law_start +
946 (2ULL << (law[i].lawar & LAWAR_SIZE_MASK));
947
948 if (law_start <= phys && phys < law_end) {
949 law_target = law[i].lawar & LAWAR_TARGET_MASK;
950 break;
951 }
952 }
953 }
954
955 if (i == 0 || i == num_laws) {
956 /* This should never happen*/
957 ret = -ENOENT;
958 goto error;
959 }
960
961 /* Find a free LAW entry */
962 while (law[--i].lawar & LAWAR_EN) {
963 if (i == 0) {
964 /* No higher priority LAW slots available */
965 ret = -ENOENT;
966 goto error;
967 }
968 }
969
970 law[i].lawbarh = upper_32_bits(phys);
971 law[i].lawbarl = lower_32_bits(phys);
972 wmb();
973 law[i].lawar = LAWAR_EN | law_target | (csd_id << LAWAR_CSDID_SHIFT) |
974 (LAW_SIZE_4K + get_order(size));
975 wmb();
976
977error:
978 if (ccm)
979 iounmap(ccm);
980
981 if (lac)
982 iounmap(lac);
983
984 if (np)
985 of_node_put(np);
986
987 return ret;
988}
989
990/*
991 * Table of SVRs and the corresponding PORT_ID values. Port ID corresponds to a
992 * bit map of snoopers for a given range of memory mapped by a LAW.
993 *
994 * All future CoreNet-enabled SOCs will have this erratum(A-004510) fixed, so this
995 * table should never need to be updated. SVRs are guaranteed to be unique, so
996 * there is no worry that a future SOC will inadvertently have one of these
997 * values.
998 */
999static const struct {
1000 u32 svr;
1001 u32 port_id;
1002} port_id_map[] = {
1003 {0x82100010, 0xFF000000}, /* P2040 1.0 */
1004 {0x82100011, 0xFF000000}, /* P2040 1.1 */
1005 {0x82100110, 0xFF000000}, /* P2041 1.0 */
1006 {0x82100111, 0xFF000000}, /* P2041 1.1 */
1007 {0x82110310, 0xFF000000}, /* P3041 1.0 */
1008 {0x82110311, 0xFF000000}, /* P3041 1.1 */
1009 {0x82010020, 0xFFF80000}, /* P4040 2.0 */
1010 {0x82000020, 0xFFF80000}, /* P4080 2.0 */
1011 {0x82210010, 0xFC000000}, /* P5010 1.0 */
1012 {0x82210020, 0xFC000000}, /* P5010 2.0 */
1013 {0x82200010, 0xFC000000}, /* P5020 1.0 */
1014 {0x82050010, 0xFF800000}, /* P5021 1.0 */
1015 {0x82040010, 0xFF800000}, /* P5040 1.0 */
1016};
1017
1018#define SVR_SECURITY 0x80000 /* The Security (E) bit */
1019
1020static int __init fsl_pamu_probe(struct platform_device *pdev)
1021{
1022 void __iomem *pamu_regs = NULL;
1023 struct ccsr_guts __iomem *guts_regs = NULL;
1024 u32 pamubypenr, pamu_counter;
1025 unsigned long pamu_reg_off;
1026 unsigned long pamu_reg_base;
1027 struct pamu_isr_data *data = NULL;
1028 struct device_node *guts_node;
1029 u64 size;
1030 struct page *p;
1031 int ret = 0;
1032 int irq;
1033 phys_addr_t ppaact_phys;
1034 phys_addr_t spaact_phys;
1035 phys_addr_t omt_phys;
1036 size_t mem_size = 0;
1037 unsigned int order = 0;
1038 u32 csd_port_id = 0;
1039 unsigned i;
1040 /*
1041 * enumerate all PAMUs and allocate and setup PAMU tables
1042 * for each of them,
1043 * NOTE : All PAMUs share the same LIODN tables.
1044 */
1045
1046 pamu_regs = of_iomap(pdev->dev.of_node, 0);
1047 if (!pamu_regs) {
1048 dev_err(&pdev->dev, "ioremap of PAMU node failed\n");
1049 return -ENOMEM;
1050 }
1051 of_get_address(pdev->dev.of_node, 0, &size, NULL);
1052
1053 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1054 if (irq == NO_IRQ) {
1055 dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n");
1056 goto error;
1057 }
1058
1059 data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL);
1060 if (!data) {
1061 dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n");
1062 ret = -ENOMEM;
1063 goto error;
1064 }
1065 data->pamu_reg_base = pamu_regs;
1066 data->count = size / PAMU_OFFSET;
1067
1068 /* The ISR needs access to the regs, so we won't iounmap them */
1069 ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
1070 if (ret < 0) {
1071 dev_err(&pdev->dev, "error %i installing ISR for irq %i\n",
1072 ret, irq);
1073 goto error;
1074 }
1075
1076 guts_node = of_find_matching_node(NULL, guts_device_ids);
1077 if (!guts_node) {
1078 dev_err(&pdev->dev, "could not find GUTS node %s\n",
1079 pdev->dev.of_node->full_name);
1080 ret = -ENODEV;
1081 goto error;
1082 }
1083
1084 guts_regs = of_iomap(guts_node, 0);
1085 of_node_put(guts_node);
1086 if (!guts_regs) {
1087 dev_err(&pdev->dev, "ioremap of GUTS node failed\n");
1088 ret = -ENODEV;
1089 goto error;
1090 }
1091
1092 /* read in the PAMU capability registers */
1093 get_pamu_cap_values((unsigned long)pamu_regs);
1094 /*
1095 * To simplify the allocation of a coherency domain, we allocate the
1096 * PAACT and the OMT in the same memory buffer. Unfortunately, this
1097 * wastes more memory compared to allocating the buffers separately.
1098 */
1099 /* Determine how much memory we need */
1100 mem_size = (PAGE_SIZE << get_order(PAACT_SIZE)) +
1101 (PAGE_SIZE << get_order(SPAACT_SIZE)) +
1102 (PAGE_SIZE << get_order(OMT_SIZE));
1103 order = get_order(mem_size);
1104
1105 p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
1106 if (!p) {
1107 dev_err(&pdev->dev, "unable to allocate PAACT/SPAACT/OMT block\n");
1108 ret = -ENOMEM;
1109 goto error;
1110 }
1111
1112 ppaact = page_address(p);
1113 ppaact_phys = page_to_phys(p);
1114
1115 /* Make sure the memory is naturally aligned */
1116 if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
1117 dev_err(&pdev->dev, "PAACT/OMT block is unaligned\n");
1118 ret = -ENOMEM;
1119 goto error;
1120 }
1121
1122 spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
1123 omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
1124
1125 dev_dbg(&pdev->dev, "ppaact virt=%p phys=0x%llx\n", ppaact,
1126 (unsigned long long) ppaact_phys);
1127
1128 /* Check to see if we need to implement the work-around on this SOC */
1129
1130 /* Determine the Port ID for our coherence subdomain */
1131 for (i = 0; i < ARRAY_SIZE(port_id_map); i++) {
1132 if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) {
1133 csd_port_id = port_id_map[i].port_id;
1134 dev_dbg(&pdev->dev, "found matching SVR %08x\n",
1135 port_id_map[i].svr);
1136 break;
1137 }
1138 }
1139
1140 if (csd_port_id) {
1141 dev_dbg(&pdev->dev, "creating coherency subdomain at address "
1142 "0x%llx, size %zu, port id 0x%08x", ppaact_phys,
1143 mem_size, csd_port_id);
1144
1145 ret = create_csd(ppaact_phys, mem_size, csd_port_id);
1146 if (ret) {
1147 dev_err(&pdev->dev, "could not create coherence "
1148 "subdomain\n");
1149 return ret;
1150 }
1151 }
1152
1153 spaact_phys = virt_to_phys(spaact);
1154 omt_phys = virt_to_phys(omt);
1155
1156 spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
1157 if (!spaace_pool) {
1158 ret = -ENOMEM;
1159 dev_err(&pdev->dev, "PAMU : failed to allocate spaace gen pool\n");
1160 goto error;
1161 }
1162
1163 ret = gen_pool_add(spaace_pool, (unsigned long)spaact, SPAACT_SIZE, -1);
1164 if (ret)
1165 goto error_genpool;
1166
1167 pamubypenr = in_be32(&guts_regs->pamubypenr);
1168
1169 for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
1170 pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
1171
1172 pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off;
1173 setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
1174 spaact_phys, omt_phys);
1175 /* Disable PAMU bypass for this PAMU */
1176 pamubypenr &= ~pamu_counter;
1177 }
1178
1179 setup_omt(omt);
1180
1181 /* Enable all relevant PAMU(s) */
1182 out_be32(&guts_regs->pamubypenr, pamubypenr);
1183
1184 iounmap(guts_regs);
1185
1186 /* Enable DMA for the LIODNs in the device tree*/
1187
1188 setup_liodns();
1189
1190 return 0;
1191
1192error_genpool:
1193 gen_pool_destroy(spaace_pool);
1194
1195error:
1196 if (irq != NO_IRQ)
1197 free_irq(irq, data);
1198
1199 if (data) {
1200 memset(data, 0, sizeof(struct pamu_isr_data));
1201 kfree(data);
1202 }
1203
1204 if (pamu_regs)
1205 iounmap(pamu_regs);
1206
1207 if (guts_regs)
1208 iounmap(guts_regs);
1209
1210 if (ppaact)
1211 free_pages((unsigned long)ppaact, order);
1212
1213 ppaact = NULL;
1214
1215 return ret;
1216}
1217
1218static const struct of_device_id fsl_of_pamu_ids[] = {
1219 {
1220 .compatible = "fsl,p4080-pamu",
1221 },
1222 {
1223 .compatible = "fsl,pamu",
1224 },
1225 {},
1226};
1227
1228static struct platform_driver fsl_of_pamu_driver = {
1229 .driver = {
1230 .name = "fsl-of-pamu",
1231 .owner = THIS_MODULE,
1232 },
1233 .probe = fsl_pamu_probe,
1234};
1235
1236static __init int fsl_pamu_init(void)
1237{
1238 struct platform_device *pdev = NULL;
1239 struct device_node *np;
1240 int ret;
1241
1242 /*
1243 * The normal OF process calls the probe function at some
1244 * indeterminate later time, after most drivers have loaded. This is
1245 * too late for us, because PAMU clients (like the Qman driver)
1246 * depend on PAMU being initialized early.
1247 *
1248 * So instead, we "manually" call our probe function by creating the
1249 * platform devices ourselves.
1250 */
1251
1252 /*
1253 * We assume that there is only one PAMU node in the device tree. A
1254 * single PAMU node represents all of the PAMU devices in the SOC
1255 * already. Everything else already makes that assumption, and the
1256 * binding for the PAMU nodes doesn't allow for any parent-child
1257 * relationships anyway. In other words, support for more than one
1258 * PAMU node would require significant changes to a lot of code.
1259 */
1260
1261 np = of_find_compatible_node(NULL, NULL, "fsl,pamu");
1262 if (!np) {
1263 pr_err("fsl-pamu: could not find a PAMU node\n");
1264 return -ENODEV;
1265 }
1266
1267 ret = platform_driver_register(&fsl_of_pamu_driver);
1268 if (ret) {
1269 pr_err("fsl-pamu: could not register driver (err=%i)\n", ret);
1270 goto error_driver_register;
1271 }
1272
1273 pdev = platform_device_alloc("fsl-of-pamu", 0);
1274 if (!pdev) {
1275 pr_err("fsl-pamu: could not allocate device %s\n",
1276 np->full_name);
1277 ret = -ENOMEM;
1278 goto error_device_alloc;
1279 }
1280 pdev->dev.of_node = of_node_get(np);
1281
1282 ret = pamu_domain_init();
1283 if (ret)
1284 goto error_device_add;
1285
1286 ret = platform_device_add(pdev);
1287 if (ret) {
1288 pr_err("fsl-pamu: could not add device %s (err=%i)\n",
1289 np->full_name, ret);
1290 goto error_device_add;
1291 }
1292
1293 return 0;
1294
1295error_device_add:
1296 of_node_put(pdev->dev.of_node);
1297 pdev->dev.of_node = NULL;
1298
1299 platform_device_put(pdev);
1300
1301error_device_alloc:
1302 platform_driver_unregister(&fsl_of_pamu_driver);
1303
1304error_driver_register:
1305 of_node_put(np);
1306
1307 return ret;
1308}
1309arch_initcall(fsl_pamu_init);
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h
new file mode 100644
index 000000000000..8fc1a125b16e
--- /dev/null
+++ b/drivers/iommu/fsl_pamu.h
@@ -0,0 +1,410 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
16 *
17 */
18
19#ifndef __FSL_PAMU_H
20#define __FSL_PAMU_H
21
22#include <asm/fsl_pamu_stash.h>
23
24/* Bit Field macros
25 * v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load
26 */
27#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << (m##_SHIFT)) & (m)))
28#define get_bf(v, m) (((v) & (m)) >> (m##_SHIFT))
29
30/* PAMU CCSR space */
31#define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */
32#define PAMU_PE 0x40000000 /* enable PAMU */
33
34/* PAMU_OFFSET to the next pamu space in ccsr */
35#define PAMU_OFFSET 0x1000
36
37#define PAMU_MMAP_REGS_BASE 0
38
39struct pamu_mmap_regs {
40 u32 ppbah;
41 u32 ppbal;
42 u32 pplah;
43 u32 pplal;
44 u32 spbah;
45 u32 spbal;
46 u32 splah;
47 u32 splal;
48 u32 obah;
49 u32 obal;
50 u32 olah;
51 u32 olal;
52};
53
54/* PAMU Error Registers */
55#define PAMU_POES1 0x0040
56#define PAMU_POES2 0x0044
57#define PAMU_POEAH 0x0048
58#define PAMU_POEAL 0x004C
59#define PAMU_AVS1 0x0050
60#define PAMU_AVS1_AV 0x1
61#define PAMU_AVS1_OTV 0x6
62#define PAMU_AVS1_APV 0x78
63#define PAMU_AVS1_WAV 0x380
64#define PAMU_AVS1_LAV 0x1c00
65#define PAMU_AVS1_GCV 0x2000
66#define PAMU_AVS1_PDV 0x4000
67#define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \
68 | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
69#define PAMU_AVS1_LIODN_SHIFT 16
70#define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400
71
72#define PAMU_AVS2 0x0054
73#define PAMU_AVAH 0x0058
74#define PAMU_AVAL 0x005C
75#define PAMU_EECTL 0x0060
76#define PAMU_EEDIS 0x0064
77#define PAMU_EEINTEN 0x0068
78#define PAMU_EEDET 0x006C
79#define PAMU_EEATTR 0x0070
80#define PAMU_EEAHI 0x0074
81#define PAMU_EEALO 0x0078
82#define PAMU_EEDHI 0X007C
83#define PAMU_EEDLO 0x0080
84#define PAMU_EECC 0x0084
85#define PAMU_UDAD 0x0090
86
87/* PAMU Revision Registers */
88#define PAMU_PR1 0x0BF8
89#define PAMU_PR2 0x0BFC
90
91/* PAMU version mask */
92#define PAMU_PR1_MASK 0xffff
93
94/* PAMU Capabilities Registers */
95#define PAMU_PC1 0x0C00
96#define PAMU_PC2 0x0C04
97#define PAMU_PC3 0x0C08
98#define PAMU_PC4 0x0C0C
99
100/* PAMU Control Register */
101#define PAMU_PC 0x0C10
102
103/* PAMU control defs */
104#define PAMU_CONTROL 0x0C10
105#define PAMU_PC_PGC 0x80000000 /* PAMU gate closed bit */
106#define PAMU_PC_PE 0x40000000 /* PAMU enable bit */
107#define PAMU_PC_SPCC 0x00000010 /* sPAACE cache enable */
108#define PAMU_PC_PPCC 0x00000001 /* pPAACE cache enable */
109#define PAMU_PC_OCE 0x00001000 /* OMT cache enable */
110
111#define PAMU_PFA1 0x0C14
112#define PAMU_PFA2 0x0C18
113
114#define PAMU_PC2_MLIODN(X) ((X) >> 16)
115#define PAMU_PC3_MWCE(X) (((X) >> 21) & 0xf)
116
117/* PAMU Interrupt control and Status Register */
118#define PAMU_PICS 0x0C1C
119#define PAMU_ACCESS_VIOLATION_STAT 0x8
120#define PAMU_ACCESS_VIOLATION_ENABLE 0x4
121
122/* PAMU Debug Registers */
123#define PAMU_PD1 0x0F00
124#define PAMU_PD2 0x0F04
125#define PAMU_PD3 0x0F08
126#define PAMU_PD4 0x0F0C
127
128#define PAACE_AP_PERMS_DENIED 0x0
129#define PAACE_AP_PERMS_QUERY 0x1
130#define PAACE_AP_PERMS_UPDATE 0x2
131#define PAACE_AP_PERMS_ALL 0x3
132
133#define PAACE_DD_TO_HOST 0x0
134#define PAACE_DD_TO_IO 0x1
135#define PAACE_PT_PRIMARY 0x0
136#define PAACE_PT_SECONDARY 0x1
137#define PAACE_V_INVALID 0x0
138#define PAACE_V_VALID 0x1
139#define PAACE_MW_SUBWINDOWS 0x1
140
141#define PAACE_WSE_4K 0xB
142#define PAACE_WSE_8K 0xC
143#define PAACE_WSE_16K 0xD
144#define PAACE_WSE_32K 0xE
145#define PAACE_WSE_64K 0xF
146#define PAACE_WSE_128K 0x10
147#define PAACE_WSE_256K 0x11
148#define PAACE_WSE_512K 0x12
149#define PAACE_WSE_1M 0x13
150#define PAACE_WSE_2M 0x14
151#define PAACE_WSE_4M 0x15
152#define PAACE_WSE_8M 0x16
153#define PAACE_WSE_16M 0x17
154#define PAACE_WSE_32M 0x18
155#define PAACE_WSE_64M 0x19
156#define PAACE_WSE_128M 0x1A
157#define PAACE_WSE_256M 0x1B
158#define PAACE_WSE_512M 0x1C
159#define PAACE_WSE_1G 0x1D
160#define PAACE_WSE_2G 0x1E
161#define PAACE_WSE_4G 0x1F
162
163#define PAACE_DID_PCI_EXPRESS_1 0x00
164#define PAACE_DID_PCI_EXPRESS_2 0x01
165#define PAACE_DID_PCI_EXPRESS_3 0x02
166#define PAACE_DID_PCI_EXPRESS_4 0x03
167#define PAACE_DID_LOCAL_BUS 0x04
168#define PAACE_DID_SRIO 0x0C
169#define PAACE_DID_MEM_1 0x10
170#define PAACE_DID_MEM_2 0x11
171#define PAACE_DID_MEM_3 0x12
172#define PAACE_DID_MEM_4 0x13
173#define PAACE_DID_MEM_1_2 0x14
174#define PAACE_DID_MEM_3_4 0x15
175#define PAACE_DID_MEM_1_4 0x16
176#define PAACE_DID_BM_SW_PORTAL 0x18
177#define PAACE_DID_PAMU 0x1C
178#define PAACE_DID_CAAM 0x21
179#define PAACE_DID_QM_SW_PORTAL 0x3C
180#define PAACE_DID_CORE0_INST 0x80
181#define PAACE_DID_CORE0_DATA 0x81
182#define PAACE_DID_CORE1_INST 0x82
183#define PAACE_DID_CORE1_DATA 0x83
184#define PAACE_DID_CORE2_INST 0x84
185#define PAACE_DID_CORE2_DATA 0x85
186#define PAACE_DID_CORE3_INST 0x86
187#define PAACE_DID_CORE3_DATA 0x87
188#define PAACE_DID_CORE4_INST 0x88
189#define PAACE_DID_CORE4_DATA 0x89
190#define PAACE_DID_CORE5_INST 0x8A
191#define PAACE_DID_CORE5_DATA 0x8B
192#define PAACE_DID_CORE6_INST 0x8C
193#define PAACE_DID_CORE6_DATA 0x8D
194#define PAACE_DID_CORE7_INST 0x8E
195#define PAACE_DID_CORE7_DATA 0x8F
196#define PAACE_DID_BROADCAST 0xFF
197
198#define PAACE_ATM_NO_XLATE 0x00
199#define PAACE_ATM_WINDOW_XLATE 0x01
200#define PAACE_ATM_PAGE_XLATE 0x02
201#define PAACE_ATM_WIN_PG_XLATE \
202 (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
203#define PAACE_OTM_NO_XLATE 0x00
204#define PAACE_OTM_IMMEDIATE 0x01
205#define PAACE_OTM_INDEXED 0x02
206#define PAACE_OTM_RESERVED 0x03
207
208#define PAACE_M_COHERENCE_REQ 0x01
209
210#define PAACE_PID_0 0x0
211#define PAACE_PID_1 0x1
212#define PAACE_PID_2 0x2
213#define PAACE_PID_3 0x3
214#define PAACE_PID_4 0x4
215#define PAACE_PID_5 0x5
216#define PAACE_PID_6 0x6
217#define PAACE_PID_7 0x7
218
219#define PAACE_TCEF_FORMAT0_8B 0x00
220#define PAACE_TCEF_FORMAT1_RSVD 0x01
221/*
222 * Hard coded value for the PAACT size to accomodate
223 * maximum LIODN value generated by u-boot.
224 */
225#define PAACE_NUMBER_ENTRIES 0x500
226/* Hard coded value for the SPAACT size */
227#define SPAACE_NUMBER_ENTRIES 0x800
228
229#define OME_NUMBER_ENTRIES 16
230
231/* PAACE Bit Field Defines */
232#define PPAACE_AF_WBAL 0xfffff000
233#define PPAACE_AF_WBAL_SHIFT 12
234#define PPAACE_AF_WSE 0x00000fc0
235#define PPAACE_AF_WSE_SHIFT 6
236#define PPAACE_AF_MW 0x00000020
237#define PPAACE_AF_MW_SHIFT 5
238
239#define SPAACE_AF_LIODN 0xffff0000
240#define SPAACE_AF_LIODN_SHIFT 16
241
242#define PAACE_AF_AP 0x00000018
243#define PAACE_AF_AP_SHIFT 3
244#define PAACE_AF_DD 0x00000004
245#define PAACE_AF_DD_SHIFT 2
246#define PAACE_AF_PT 0x00000002
247#define PAACE_AF_PT_SHIFT 1
248#define PAACE_AF_V 0x00000001
249#define PAACE_AF_V_SHIFT 0
250
251#define PAACE_DA_HOST_CR 0x80
252#define PAACE_DA_HOST_CR_SHIFT 7
253
254#define PAACE_IA_CID 0x00FF0000
255#define PAACE_IA_CID_SHIFT 16
256#define PAACE_IA_WCE 0x000000F0
257#define PAACE_IA_WCE_SHIFT 4
258#define PAACE_IA_ATM 0x0000000C
259#define PAACE_IA_ATM_SHIFT 2
260#define PAACE_IA_OTM 0x00000003
261#define PAACE_IA_OTM_SHIFT 0
262
263#define PAACE_WIN_TWBAL 0xfffff000
264#define PAACE_WIN_TWBAL_SHIFT 12
265#define PAACE_WIN_SWSE 0x00000fc0
266#define PAACE_WIN_SWSE_SHIFT 6
267
268/* PAMU Data Structures */
269/* primary / secondary paact structure */
270struct paace {
271 /* PAACE Offset 0x00 */
272 u32 wbah; /* only valid for Primary PAACE */
273 u32 addr_bitfields; /* See P/S PAACE_AF_* */
274
275 /* PAACE Offset 0x08 */
276 /* Interpretation of first 32 bits dependent on DD above */
277 union {
278 struct {
279 /* Destination ID, see PAACE_DID_* defines */
280 u8 did;
281 /* Partition ID */
282 u8 pid;
283 /* Snoop ID */
284 u8 snpid;
285 /* coherency_required : 1 reserved : 7 */
286 u8 coherency_required; /* See PAACE_DA_* */
287 } to_host;
288 struct {
289 /* Destination ID, see PAACE_DID_* defines */
290 u8 did;
291 u8 reserved1;
292 u16 reserved2;
293 } to_io;
294 } domain_attr;
295
296 /* Implementation attributes + window count + address & operation translation modes */
297 u32 impl_attr; /* See PAACE_IA_* */
298
299 /* PAACE Offset 0x10 */
300 /* Translated window base address */
301 u32 twbah;
302 u32 win_bitfields; /* See PAACE_WIN_* */
303
304 /* PAACE Offset 0x18 */
305 /* first secondary paace entry */
306 u32 fspi; /* only valid for Primary PAACE */
307 union {
308 struct {
309 u8 ioea;
310 u8 moea;
311 u8 ioeb;
312 u8 moeb;
313 } immed_ot;
314 struct {
315 u16 reserved;
316 u16 omi;
317 } index_ot;
318 } op_encode;
319
320 /* PAACE Offsets 0x20-0x38 */
321 u32 reserved[8]; /* not currently implemented */
322};
323
324/* OME : Operation mapping entry
325 * MOE : Mapped Operation Encodings
326 * The operation mapping table is table containing operation mapping entries (OME).
327 * The index of a particular OME is programmed in the PAACE entry for translation
328 * in bound I/O operations corresponding to an LIODN. The OMT is used for translation
329 * specifically in case of the indexed translation mode. Each OME contains a 128
330 * byte mapped operation encoding (MOE), where each byte represents an MOE.
331 */
332#define NUM_MOE 128
333struct ome {
334 u8 moe[NUM_MOE];
335} __attribute__((packed));
336
337#define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES)
338#define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES)
339#define OMT_SIZE (sizeof(struct ome) * OME_NUMBER_ENTRIES)
340
341#define PAMU_PAGE_SHIFT 12
342#define PAMU_PAGE_SIZE 4096ULL
343
344#define IOE_READ 0x00
345#define IOE_READ_IDX 0x00
346#define IOE_WRITE 0x81
347#define IOE_WRITE_IDX 0x01
348#define IOE_EREAD0 0x82 /* Enhanced read type 0 */
349#define IOE_EREAD0_IDX 0x02 /* Enhanced read type 0 */
350#define IOE_EWRITE0 0x83 /* Enhanced write type 0 */
351#define IOE_EWRITE0_IDX 0x03 /* Enhanced write type 0 */
352#define IOE_DIRECT0 0x84 /* Directive type 0 */
353#define IOE_DIRECT0_IDX 0x04 /* Directive type 0 */
354#define IOE_EREAD1 0x85 /* Enhanced read type 1 */
355#define IOE_EREAD1_IDX 0x05 /* Enhanced read type 1 */
356#define IOE_EWRITE1 0x86 /* Enhanced write type 1 */
357#define IOE_EWRITE1_IDX 0x06 /* Enhanced write type 1 */
358#define IOE_DIRECT1 0x87 /* Directive type 1 */
359#define IOE_DIRECT1_IDX 0x07 /* Directive type 1 */
360#define IOE_RAC 0x8c /* Read with Atomic clear */
361#define IOE_RAC_IDX 0x0c /* Read with Atomic clear */
362#define IOE_RAS 0x8d /* Read with Atomic set */
363#define IOE_RAS_IDX 0x0d /* Read with Atomic set */
364#define IOE_RAD 0x8e /* Read with Atomic decrement */
365#define IOE_RAD_IDX 0x0e /* Read with Atomic decrement */
366#define IOE_RAI 0x8f /* Read with Atomic increment */
367#define IOE_RAI_IDX 0x0f /* Read with Atomic increment */
368
369#define EOE_READ 0x00
370#define EOE_WRITE 0x01
371#define EOE_RAC 0x0c /* Read with Atomic clear */
372#define EOE_RAS 0x0d /* Read with Atomic set */
373#define EOE_RAD 0x0e /* Read with Atomic decrement */
374#define EOE_RAI 0x0f /* Read with Atomic increment */
375#define EOE_LDEC 0x10 /* Load external cache */
376#define EOE_LDECL 0x11 /* Load external cache with stash lock */
377#define EOE_LDECPE 0x12 /* Load external cache with preferred exclusive */
378#define EOE_LDECPEL 0x13 /* Load external cache with preferred exclusive and lock */
379#define EOE_LDECFE 0x14 /* Load external cache with forced exclusive */
380#define EOE_LDECFEL 0x15 /* Load external cache with forced exclusive and lock */
381#define EOE_RSA 0x16 /* Read with stash allocate */
382#define EOE_RSAU 0x17 /* Read with stash allocate and unlock */
383#define EOE_READI 0x18 /* Read with invalidate */
384#define EOE_RWNITC 0x19 /* Read with no intention to cache */
385#define EOE_WCI 0x1a /* Write cache inhibited */
386#define EOE_WWSA 0x1b /* Write with stash allocate */
387#define EOE_WWSAL 0x1c /* Write with stash allocate and lock */
388#define EOE_WWSAO 0x1d /* Write with stash allocate only */
389#define EOE_WWSAOL 0x1e /* Write with stash allocate only and lock */
390#define EOE_VALID 0x80
391
392/* Function prototypes */
393int pamu_domain_init(void);
394int pamu_enable_liodn(int liodn);
395int pamu_disable_liodn(int liodn);
396void pamu_free_subwins(int liodn);
397int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
398 u32 omi, unsigned long rpn, u32 snoopid, uint32_t stashid,
399 u32 subwin_cnt, int prot);
400int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin_addr,
401 phys_addr_t subwin_size, u32 omi, unsigned long rpn,
402 uint32_t snoopid, u32 stashid, int enable, int prot);
403
404u32 get_stash_id(u32 stash_dest_hint, u32 vcpu);
405void get_ome_index(u32 *omi_index, struct device *dev);
406int pamu_update_paace_stash(int liodn, u32 subwin, u32 value);
407int pamu_disable_spaace(int liodn, u32 subwin);
408u32 pamu_get_max_subwin_cnt(void);
409
410#endif /* __FSL_PAMU_H */
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
new file mode 100644
index 000000000000..14d803a25e2f
--- /dev/null
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -0,0 +1,1172 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
16 * Author: Varun Sethi <varun.sethi@freescale.com>
17 *
18 */
19
20#define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
21
22#include <linux/init.h>
23#include <linux/iommu.h>
24#include <linux/notifier.h>
25#include <linux/slab.h>
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/interrupt.h>
30#include <linux/device.h>
31#include <linux/of_platform.h>
32#include <linux/bootmem.h>
33#include <linux/err.h>
34#include <asm/io.h>
35#include <asm/bitops.h>
36
37#include <asm/pci-bridge.h>
38#include <sysdev/fsl_pci.h>
39
40#include "fsl_pamu_domain.h"
41#include "pci.h"
42
43/*
44 * Global spinlock that needs to be held while
45 * configuring PAMU.
46 */
47static DEFINE_SPINLOCK(iommu_lock);
48
49static struct kmem_cache *fsl_pamu_domain_cache;
50static struct kmem_cache *iommu_devinfo_cache;
51static DEFINE_SPINLOCK(device_domain_lock);
52
53static int __init iommu_init_mempool(void)
54{
55
56 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
57 sizeof(struct fsl_dma_domain),
58 0,
59 SLAB_HWCACHE_ALIGN,
60
61 NULL);
62 if (!fsl_pamu_domain_cache) {
63 pr_debug("Couldn't create fsl iommu_domain cache\n");
64 return -ENOMEM;
65 }
66
67 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
68 sizeof(struct device_domain_info),
69 0,
70 SLAB_HWCACHE_ALIGN,
71 NULL);
72 if (!iommu_devinfo_cache) {
73 pr_debug("Couldn't create devinfo cache\n");
74 kmem_cache_destroy(fsl_pamu_domain_cache);
75 return -ENOMEM;
76 }
77
78 return 0;
79}
80
81static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
82{
83 u32 win_cnt = dma_domain->win_cnt;
84 struct dma_window *win_ptr =
85 &dma_domain->win_arr[0];
86 struct iommu_domain_geometry *geom;
87
88 geom = &dma_domain->iommu_domain->geometry;
89
90 if (!win_cnt || !dma_domain->geom_size) {
91 pr_debug("Number of windows/geometry not configured for the domain\n");
92 return 0;
93 }
94
95 if (win_cnt > 1) {
96 u64 subwin_size;
97 dma_addr_t subwin_iova;
98 u32 wnd;
99
100 subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
101 subwin_iova = iova & ~(subwin_size - 1);
102 wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
103 win_ptr = &dma_domain->win_arr[wnd];
104 }
105
106 if (win_ptr->valid)
107 return (win_ptr->paddr + (iova & (win_ptr->size - 1)));
108
109 return 0;
110}
111
112static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
113{
114 struct dma_window *sub_win_ptr =
115 &dma_domain->win_arr[0];
116 int i, ret;
117 unsigned long rpn, flags;
118
119 for (i = 0; i < dma_domain->win_cnt; i++) {
120 if (sub_win_ptr[i].valid) {
121 rpn = sub_win_ptr[i].paddr >>
122 PAMU_PAGE_SHIFT;
123 spin_lock_irqsave(&iommu_lock, flags);
124 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
125 sub_win_ptr[i].size,
126 ~(u32)0,
127 rpn,
128 dma_domain->snoop_id,
129 dma_domain->stash_id,
130 (i > 0) ? 1 : 0,
131 sub_win_ptr[i].prot);
132 spin_unlock_irqrestore(&iommu_lock, flags);
133 if (ret) {
134 pr_debug("PAMU SPAACE configuration failed for liodn %d\n",
135 liodn);
136 return ret;
137 }
138 }
139 }
140
141 return ret;
142}
143
144static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
145{
146 int ret;
147 struct dma_window *wnd = &dma_domain->win_arr[0];
148 phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
149 unsigned long flags;
150
151 spin_lock_irqsave(&iommu_lock, flags);
152 ret = pamu_config_ppaace(liodn, wnd_addr,
153 wnd->size,
154 ~(u32)0,
155 wnd->paddr >> PAMU_PAGE_SHIFT,
156 dma_domain->snoop_id, dma_domain->stash_id,
157 0, wnd->prot);
158 spin_unlock_irqrestore(&iommu_lock, flags);
159 if (ret)
160 pr_debug("PAMU PAACE configuration failed for liodn %d\n",
161 liodn);
162
163 return ret;
164}
165
166/* Map the DMA window corresponding to the LIODN */
167static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
168{
169 if (dma_domain->win_cnt > 1)
170 return map_subwins(liodn, dma_domain);
171 else
172 return map_win(liodn, dma_domain);
173
174}
175
176/* Update window/subwindow mapping for the LIODN */
177static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
178{
179 int ret;
180 struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
181 unsigned long flags;
182
183 spin_lock_irqsave(&iommu_lock, flags);
184 if (dma_domain->win_cnt > 1) {
185 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
186 wnd->size,
187 ~(u32)0,
188 wnd->paddr >> PAMU_PAGE_SHIFT,
189 dma_domain->snoop_id,
190 dma_domain->stash_id,
191 (wnd_nr > 0) ? 1 : 0,
192 wnd->prot);
193 if (ret)
194 pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn);
195 } else {
196 phys_addr_t wnd_addr;
197
198 wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
199
200 ret = pamu_config_ppaace(liodn, wnd_addr,
201 wnd->size,
202 ~(u32)0,
203 wnd->paddr >> PAMU_PAGE_SHIFT,
204 dma_domain->snoop_id, dma_domain->stash_id,
205 0, wnd->prot);
206 if (ret)
207 pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
208 }
209
210 spin_unlock_irqrestore(&iommu_lock, flags);
211
212 return ret;
213}
214
215static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
216 u32 val)
217{
218 int ret = 0, i;
219 unsigned long flags;
220
221 spin_lock_irqsave(&iommu_lock, flags);
222 if (!dma_domain->win_arr) {
223 pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
224 spin_unlock_irqrestore(&iommu_lock, flags);
225 return -EINVAL;
226 }
227
228 for (i = 0; i < dma_domain->win_cnt; i++) {
229 ret = pamu_update_paace_stash(liodn, i, val);
230 if (ret) {
231 pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
232 spin_unlock_irqrestore(&iommu_lock, flags);
233 return ret;
234 }
235 }
236
237 spin_unlock_irqrestore(&iommu_lock, flags);
238
239 return ret;
240}
241
242/* Set the geometry parameters for a LIODN */
243static int pamu_set_liodn(int liodn, struct device *dev,
244 struct fsl_dma_domain *dma_domain,
245 struct iommu_domain_geometry *geom_attr,
246 u32 win_cnt)
247{
248 phys_addr_t window_addr, window_size;
249 phys_addr_t subwin_size;
250 int ret = 0, i;
251 u32 omi_index = ~(u32)0;
252 unsigned long flags;
253
254 /*
255 * Configure the omi_index at the geometry setup time.
256 * This is a static value which depends on the type of
257 * device and would not change thereafter.
258 */
259 get_ome_index(&omi_index, dev);
260
261 window_addr = geom_attr->aperture_start;
262 window_size = dma_domain->geom_size;
263
264 spin_lock_irqsave(&iommu_lock, flags);
265 ret = pamu_disable_liodn(liodn);
266 if (!ret)
267 ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
268 0, dma_domain->snoop_id,
269 dma_domain->stash_id, win_cnt, 0);
270 spin_unlock_irqrestore(&iommu_lock, flags);
271 if (ret) {
272 pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
273 return ret;
274 }
275
276 if (win_cnt > 1) {
277 subwin_size = window_size >> ilog2(win_cnt);
278 for (i = 0; i < win_cnt; i++) {
279 spin_lock_irqsave(&iommu_lock, flags);
280 ret = pamu_disable_spaace(liodn, i);
281 if (!ret)
282 ret = pamu_config_spaace(liodn, win_cnt, i,
283 subwin_size, omi_index,
284 0, dma_domain->snoop_id,
285 dma_domain->stash_id,
286 0, 0);
287 spin_unlock_irqrestore(&iommu_lock, flags);
288 if (ret) {
289 pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn);
290 return ret;
291 }
292 }
293 }
294
295 return ret;
296}
297
298static int check_size(u64 size, dma_addr_t iova)
299{
300 /*
301 * Size must be a power of two and at least be equal
302 * to PAMU page size.
303 */
304 if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) {
305 pr_debug("%s: size too small or not a power of two\n", __func__);
306 return -EINVAL;
307 }
308
309 /* iova must be page size aligned*/
310 if (iova & (size - 1)) {
311 pr_debug("%s: address is not aligned with window size\n", __func__);
312 return -EINVAL;
313 }
314
315 return 0;
316}
317
318static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
319{
320 struct fsl_dma_domain *domain;
321
322 domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
323 if (!domain)
324 return NULL;
325
326 domain->stash_id = ~(u32)0;
327 domain->snoop_id = ~(u32)0;
328 domain->win_cnt = pamu_get_max_subwin_cnt();
329 domain->geom_size = 0;
330
331 INIT_LIST_HEAD(&domain->devices);
332
333 spin_lock_init(&domain->domain_lock);
334
335 return domain;
336}
337
338static inline struct device_domain_info *find_domain(struct device *dev)
339{
340 return dev->archdata.iommu_domain;
341}
342
343static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
344{
345 unsigned long flags;
346
347 list_del(&info->link);
348 spin_lock_irqsave(&iommu_lock, flags);
349 if (win_cnt > 1)
350 pamu_free_subwins(info->liodn);
351 pamu_disable_liodn(info->liodn);
352 spin_unlock_irqrestore(&iommu_lock, flags);
353 spin_lock_irqsave(&device_domain_lock, flags);
354 info->dev->archdata.iommu_domain = NULL;
355 kmem_cache_free(iommu_devinfo_cache, info);
356 spin_unlock_irqrestore(&device_domain_lock, flags);
357}
358
359static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
360{
361 struct device_domain_info *info, *tmp;
362 unsigned long flags;
363
364 spin_lock_irqsave(&dma_domain->domain_lock, flags);
365 /* Remove the device from the domain device list */
366 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
367 if (!dev || (info->dev == dev))
368 remove_device_ref(info, dma_domain->win_cnt);
369 }
370 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
371}
372
373static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
374{
375 struct device_domain_info *info, *old_domain_info;
376 unsigned long flags;
377
378 spin_lock_irqsave(&device_domain_lock, flags);
379 /*
380 * Check here if the device is already attached to domain or not.
381 * If the device is already attached to a domain detach it.
382 */
383 old_domain_info = find_domain(dev);
384 if (old_domain_info && old_domain_info->domain != dma_domain) {
385 spin_unlock_irqrestore(&device_domain_lock, flags);
386 detach_device(dev, old_domain_info->domain);
387 spin_lock_irqsave(&device_domain_lock, flags);
388 }
389
390 info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
391
392 info->dev = dev;
393 info->liodn = liodn;
394 info->domain = dma_domain;
395
396 list_add(&info->link, &dma_domain->devices);
397 /*
398 * In case of devices with multiple LIODNs just store
399 * the info for the first LIODN as all
400 * LIODNs share the same domain
401 */
402 if (!old_domain_info)
403 dev->archdata.iommu_domain = info;
404 spin_unlock_irqrestore(&device_domain_lock, flags);
405
406}
407
408static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
409 dma_addr_t iova)
410{
411 struct fsl_dma_domain *dma_domain = domain->priv;
412
413 if ((iova < domain->geometry.aperture_start) ||
414 iova > (domain->geometry.aperture_end))
415 return 0;
416
417 return get_phys_addr(dma_domain, iova);
418}
419
420static int fsl_pamu_domain_has_cap(struct iommu_domain *domain,
421 unsigned long cap)
422{
423 return cap == IOMMU_CAP_CACHE_COHERENCY;
424}
425
426static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
427{
428 struct fsl_dma_domain *dma_domain = domain->priv;
429
430 domain->priv = NULL;
431
432 /* remove all the devices from the device list */
433 detach_device(NULL, dma_domain);
434
435 dma_domain->enabled = 0;
436 dma_domain->mapped = 0;
437
438 kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
439}
440
441static int fsl_pamu_domain_init(struct iommu_domain *domain)
442{
443 struct fsl_dma_domain *dma_domain;
444
445 dma_domain = iommu_alloc_dma_domain();
446 if (!dma_domain) {
447 pr_debug("dma_domain allocation failed\n");
448 return -ENOMEM;
449 }
450 domain->priv = dma_domain;
451 dma_domain->iommu_domain = domain;
452 /* defaul geometry 64 GB i.e. maximum system address */
453 domain->geometry.aperture_start = 0;
454 domain->geometry.aperture_end = (1ULL << 36) - 1;
455 domain->geometry.force_aperture = true;
456
457 return 0;
458}
459
460/* Configure geometry settings for all LIODNs associated with domain */
461static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
462 struct iommu_domain_geometry *geom_attr,
463 u32 win_cnt)
464{
465 struct device_domain_info *info;
466 int ret = 0;
467
468 list_for_each_entry(info, &dma_domain->devices, link) {
469 ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
470 geom_attr, win_cnt);
471 if (ret)
472 break;
473 }
474
475 return ret;
476}
477
478/* Update stash destination for all LIODNs associated with the domain */
479static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
480{
481 struct device_domain_info *info;
482 int ret = 0;
483
484 list_for_each_entry(info, &dma_domain->devices, link) {
485 ret = update_liodn_stash(info->liodn, dma_domain, val);
486 if (ret)
487 break;
488 }
489
490 return ret;
491}
492
493/* Update domain mappings for all LIODNs associated with the domain */
494static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
495{
496 struct device_domain_info *info;
497 int ret = 0;
498
499 list_for_each_entry(info, &dma_domain->devices, link) {
500 ret = update_liodn(info->liodn, dma_domain, wnd_nr);
501 if (ret)
502 break;
503 }
504 return ret;
505}
506
507static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
508{
509 struct device_domain_info *info;
510 int ret = 0;
511
512 list_for_each_entry(info, &dma_domain->devices, link) {
513 if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
514 ret = pamu_disable_liodn(info->liodn);
515 if (!ret)
516 dma_domain->enabled = 0;
517 } else {
518 ret = pamu_disable_spaace(info->liodn, wnd_nr);
519 }
520 }
521
522 return ret;
523}
524
525static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
526{
527 struct fsl_dma_domain *dma_domain = domain->priv;
528 unsigned long flags;
529 int ret;
530
531 spin_lock_irqsave(&dma_domain->domain_lock, flags);
532 if (!dma_domain->win_arr) {
533 pr_debug("Number of windows not configured\n");
534 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
535 return;
536 }
537
538 if (wnd_nr >= dma_domain->win_cnt) {
539 pr_debug("Invalid window index\n");
540 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
541 return;
542 }
543
544 if (dma_domain->win_arr[wnd_nr].valid) {
545 ret = disable_domain_win(dma_domain, wnd_nr);
546 if (!ret) {
547 dma_domain->win_arr[wnd_nr].valid = 0;
548 dma_domain->mapped--;
549 }
550 }
551
552 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
553
554}
555
556static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
557 phys_addr_t paddr, u64 size, int prot)
558{
559 struct fsl_dma_domain *dma_domain = domain->priv;
560 struct dma_window *wnd;
561 int pamu_prot = 0;
562 int ret;
563 unsigned long flags;
564 u64 win_size;
565
566 if (prot & IOMMU_READ)
567 pamu_prot |= PAACE_AP_PERMS_QUERY;
568 if (prot & IOMMU_WRITE)
569 pamu_prot |= PAACE_AP_PERMS_UPDATE;
570
571 spin_lock_irqsave(&dma_domain->domain_lock, flags);
572 if (!dma_domain->win_arr) {
573 pr_debug("Number of windows not configured\n");
574 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
575 return -ENODEV;
576 }
577
578 if (wnd_nr >= dma_domain->win_cnt) {
579 pr_debug("Invalid window index\n");
580 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
581 return -EINVAL;
582 }
583
584 win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
585 if (size > win_size) {
586 pr_debug("Invalid window size \n");
587 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
588 return -EINVAL;
589 }
590
591 if (dma_domain->win_cnt == 1) {
592 if (dma_domain->enabled) {
593 pr_debug("Disable the window before updating the mapping\n");
594 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
595 return -EBUSY;
596 }
597
598 ret = check_size(size, domain->geometry.aperture_start);
599 if (ret) {
600 pr_debug("Aperture start not aligned to the size\n");
601 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
602 return -EINVAL;
603 }
604 }
605
606 wnd = &dma_domain->win_arr[wnd_nr];
607 if (!wnd->valid) {
608 wnd->paddr = paddr;
609 wnd->size = size;
610 wnd->prot = pamu_prot;
611
612 ret = update_domain_mapping(dma_domain, wnd_nr);
613 if (!ret) {
614 wnd->valid = 1;
615 dma_domain->mapped++;
616 }
617 } else {
618 pr_debug("Disable the window before updating the mapping\n");
619 ret = -EBUSY;
620 }
621
622 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
623
624 return ret;
625}
626
627/*
628 * Attach the LIODN to the DMA domain and configure the geometry
629 * and window mappings.
630 */
631static int handle_attach_device(struct fsl_dma_domain *dma_domain,
632 struct device *dev, const u32 *liodn,
633 int num)
634{
635 unsigned long flags;
636 struct iommu_domain *domain = dma_domain->iommu_domain;
637 int ret = 0;
638 int i;
639
640 spin_lock_irqsave(&dma_domain->domain_lock, flags);
641 for (i = 0; i < num; i++) {
642
643 /* Ensure that LIODN value is valid */
644 if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
645 pr_debug("Invalid liodn %d, attach device failed for %s\n",
646 liodn[i], dev->of_node->full_name);
647 ret = -EINVAL;
648 break;
649 }
650
651 attach_device(dma_domain, liodn[i], dev);
652 /*
653 * Check if geometry has already been configured
654 * for the domain. If yes, set the geometry for
655 * the LIODN.
656 */
657 if (dma_domain->win_arr) {
658 u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
659 ret = pamu_set_liodn(liodn[i], dev, dma_domain,
660 &domain->geometry,
661 win_cnt);
662 if (ret)
663 break;
664 if (dma_domain->mapped) {
665 /*
666 * Create window/subwindow mapping for
667 * the LIODN.
668 */
669 ret = map_liodn(liodn[i], dma_domain);
670 if (ret)
671 break;
672 }
673 }
674 }
675 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
676
677 return ret;
678}
679
680static int fsl_pamu_attach_device(struct iommu_domain *domain,
681 struct device *dev)
682{
683 struct fsl_dma_domain *dma_domain = domain->priv;
684 const u32 *liodn;
685 u32 liodn_cnt;
686 int len, ret = 0;
687 struct pci_dev *pdev = NULL;
688 struct pci_controller *pci_ctl;
689
690 /*
691 * Use LIODN of the PCI controller while attaching a
692 * PCI device.
693 */
694 if (dev->bus == &pci_bus_type) {
695 pdev = to_pci_dev(dev);
696 pci_ctl = pci_bus_to_host(pdev->bus);
697 /*
698 * make dev point to pci controller device
699 * so we can get the LIODN programmed by
700 * u-boot.
701 */
702 dev = pci_ctl->parent;
703 }
704
705 liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
706 if (liodn) {
707 liodn_cnt = len / sizeof(u32);
708 ret = handle_attach_device(dma_domain, dev,
709 liodn, liodn_cnt);
710 } else {
711 pr_debug("missing fsl,liodn property at %s\n",
712 dev->of_node->full_name);
713 ret = -EINVAL;
714 }
715
716 return ret;
717}
718
719static void fsl_pamu_detach_device(struct iommu_domain *domain,
720 struct device *dev)
721{
722 struct fsl_dma_domain *dma_domain = domain->priv;
723 const u32 *prop;
724 int len;
725 struct pci_dev *pdev = NULL;
726 struct pci_controller *pci_ctl;
727
728 /*
729 * Use LIODN of the PCI controller while detaching a
730 * PCI device.
731 */
732 if (dev->bus == &pci_bus_type) {
733 pdev = to_pci_dev(dev);
734 pci_ctl = pci_bus_to_host(pdev->bus);
735 /*
736 * make dev point to pci controller device
737 * so we can get the LIODN programmed by
738 * u-boot.
739 */
740 dev = pci_ctl->parent;
741 }
742
743 prop = of_get_property(dev->of_node, "fsl,liodn", &len);
744 if (prop)
745 detach_device(dev, dma_domain);
746 else
747 pr_debug("missing fsl,liodn property at %s\n",
748 dev->of_node->full_name);
749}
750
751static int configure_domain_geometry(struct iommu_domain *domain, void *data)
752{
753 struct iommu_domain_geometry *geom_attr = data;
754 struct fsl_dma_domain *dma_domain = domain->priv;
755 dma_addr_t geom_size;
756 unsigned long flags;
757
758 geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
759 /*
760 * Sanity check the geometry size. Also, we do not support
761 * DMA outside of the geometry.
762 */
763 if (check_size(geom_size, geom_attr->aperture_start) ||
764 !geom_attr->force_aperture) {
765 pr_debug("Invalid PAMU geometry attributes\n");
766 return -EINVAL;
767 }
768
769 spin_lock_irqsave(&dma_domain->domain_lock, flags);
770 if (dma_domain->enabled) {
771 pr_debug("Can't set geometry attributes as domain is active\n");
772 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
773 return -EBUSY;
774 }
775
776 /* Copy the domain geometry information */
777 memcpy(&domain->geometry, geom_attr,
778 sizeof(struct iommu_domain_geometry));
779 dma_domain->geom_size = geom_size;
780
781 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
782
783 return 0;
784}
785
786/* Set the domain stash attribute */
787static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
788{
789 struct pamu_stash_attribute *stash_attr = data;
790 unsigned long flags;
791 int ret;
792
793 spin_lock_irqsave(&dma_domain->domain_lock, flags);
794
795 memcpy(&dma_domain->dma_stash, stash_attr,
796 sizeof(struct pamu_stash_attribute));
797
798 dma_domain->stash_id = get_stash_id(stash_attr->cache,
799 stash_attr->cpu);
800 if (dma_domain->stash_id == ~(u32)0) {
801 pr_debug("Invalid stash attributes\n");
802 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
803 return -EINVAL;
804 }
805
806 ret = update_domain_stash(dma_domain, dma_domain->stash_id);
807
808 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
809
810 return ret;
811}
812
813/* Configure domain dma state i.e. enable/disable DMA*/
814static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
815{
816 struct device_domain_info *info;
817 unsigned long flags;
818 int ret;
819
820 spin_lock_irqsave(&dma_domain->domain_lock, flags);
821
822 if (enable && !dma_domain->mapped) {
823 pr_debug("Can't enable DMA domain without valid mapping\n");
824 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
825 return -ENODEV;
826 }
827
828 dma_domain->enabled = enable;
829 list_for_each_entry(info, &dma_domain->devices,
830 link) {
831 ret = (enable) ? pamu_enable_liodn(info->liodn) :
832 pamu_disable_liodn(info->liodn);
833 if (ret)
834 pr_debug("Unable to set dma state for liodn %d",
835 info->liodn);
836 }
837 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
838
839 return 0;
840}
841
842static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
843 enum iommu_attr attr_type, void *data)
844{
845 struct fsl_dma_domain *dma_domain = domain->priv;
846 int ret = 0;
847
848
849 switch (attr_type) {
850 case DOMAIN_ATTR_GEOMETRY:
851 ret = configure_domain_geometry(domain, data);
852 break;
853 case DOMAIN_ATTR_FSL_PAMU_STASH:
854 ret = configure_domain_stash(dma_domain, data);
855 break;
856 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
857 ret = configure_domain_dma_state(dma_domain, *(int *)data);
858 break;
859 default:
860 pr_debug("Unsupported attribute type\n");
861 ret = -EINVAL;
862 break;
863 };
864
865 return ret;
866}
867
868static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
869 enum iommu_attr attr_type, void *data)
870{
871 struct fsl_dma_domain *dma_domain = domain->priv;
872 int ret = 0;
873
874
875 switch (attr_type) {
876 case DOMAIN_ATTR_FSL_PAMU_STASH:
877 memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
878 sizeof(struct pamu_stash_attribute));
879 break;
880 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
881 *(int *)data = dma_domain->enabled;
882 break;
883 case DOMAIN_ATTR_FSL_PAMUV1:
884 *(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
885 break;
886 default:
887 pr_debug("Unsupported attribute type\n");
888 ret = -EINVAL;
889 break;
890 };
891
892 return ret;
893}
894
895#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
896
897static struct iommu_group *get_device_iommu_group(struct device *dev)
898{
899 struct iommu_group *group;
900
901 group = iommu_group_get(dev);
902 if (!group)
903 group = iommu_group_alloc();
904
905 return group;
906}
907
908static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
909{
910 u32 version;
911
912 /* Check the PCI controller version number by readding BRR1 register */
913 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
914 version &= PCI_FSL_BRR1_VER;
915 /* If PCI controller version is >= 0x204 we can partition endpoints*/
916 if (version >= 0x204)
917 return 1;
918
919 return 0;
920}
921
922/* Get iommu group information from peer devices or devices on the parent bus */
923static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
924{
925 struct pci_dev *tmp;
926 struct iommu_group *group;
927 struct pci_bus *bus = pdev->bus;
928
929 /*
930 * Traverese the pci bus device list to get
931 * the shared iommu group.
932 */
933 while (bus) {
934 list_for_each_entry(tmp, &bus->devices, bus_list) {
935 if (tmp == pdev)
936 continue;
937 group = iommu_group_get(&tmp->dev);
938 if (group)
939 return group;
940 }
941
942 bus = bus->parent;
943 }
944
945 return NULL;
946}
947
948static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
949{
950 struct pci_controller *pci_ctl;
951 bool pci_endpt_partioning;
952 struct iommu_group *group = NULL;
953 struct pci_dev *bridge, *dma_pdev = NULL;
954
955 pci_ctl = pci_bus_to_host(pdev->bus);
956 pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
957 /* We can partition PCIe devices so assign device group to the device */
958 if (pci_endpt_partioning) {
959 bridge = pci_find_upstream_pcie_bridge(pdev);
960 if (bridge) {
961 if (pci_is_pcie(bridge))
962 dma_pdev = pci_get_domain_bus_and_slot(
963 pci_domain_nr(pdev->bus),
964 bridge->subordinate->number, 0);
965 if (!dma_pdev)
966 dma_pdev = pci_dev_get(bridge);
967 } else
968 dma_pdev = pci_dev_get(pdev);
969
970 /* Account for quirked devices */
971 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
972
973 /*
974 * If it's a multifunction device that does not support our
975 * required ACS flags, add to the same group as lowest numbered
976 * function that also does not suport the required ACS flags.
977 */
978 if (dma_pdev->multifunction &&
979 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
980 u8 i, slot = PCI_SLOT(dma_pdev->devfn);
981
982 for (i = 0; i < 8; i++) {
983 struct pci_dev *tmp;
984
985 tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
986 if (!tmp)
987 continue;
988
989 if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
990 swap_pci_ref(&dma_pdev, tmp);
991 break;
992 }
993 pci_dev_put(tmp);
994 }
995 }
996
997 /*
998 * Devices on the root bus go through the iommu. If that's not us,
999 * find the next upstream device and test ACS up to the root bus.
1000 * Finding the next device may require skipping virtual buses.
1001 */
1002 while (!pci_is_root_bus(dma_pdev->bus)) {
1003 struct pci_bus *bus = dma_pdev->bus;
1004
1005 while (!bus->self) {
1006 if (!pci_is_root_bus(bus))
1007 bus = bus->parent;
1008 else
1009 goto root_bus;
1010 }
1011
1012 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1013 break;
1014
1015 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
1016 }
1017
1018root_bus:
1019 group = get_device_iommu_group(&dma_pdev->dev);
1020 pci_dev_put(dma_pdev);
1021 /*
1022 * PCIe controller is not a paritionable entity
1023 * free the controller device iommu_group.
1024 */
1025 if (pci_ctl->parent->iommu_group)
1026 iommu_group_remove_device(pci_ctl->parent);
1027 } else {
1028 /*
1029 * All devices connected to the controller will share the
1030 * PCI controllers device group. If this is the first
1031 * device to be probed for the pci controller, copy the
1032 * device group information from the PCI controller device
1033 * node and remove the PCI controller iommu group.
1034 * For subsequent devices, the iommu group information can
1035 * be obtained from sibling devices (i.e. from the bus_devices
1036 * link list).
1037 */
1038 if (pci_ctl->parent->iommu_group) {
1039 group = get_device_iommu_group(pci_ctl->parent);
1040 iommu_group_remove_device(pci_ctl->parent);
1041 } else
1042 group = get_shared_pci_device_group(pdev);
1043 }
1044
1045 return group;
1046}
1047
1048static int fsl_pamu_add_device(struct device *dev)
1049{
1050 struct iommu_group *group = NULL;
1051 struct pci_dev *pdev;
1052 const u32 *prop;
1053 int ret, len;
1054
1055 /*
1056 * For platform devices we allocate a separate group for
1057 * each of the devices.
1058 */
1059 if (dev->bus == &pci_bus_type) {
1060 pdev = to_pci_dev(dev);
1061 /* Don't create device groups for virtual PCI bridges */
1062 if (pdev->subordinate)
1063 return 0;
1064
1065 group = get_pci_device_group(pdev);
1066
1067 } else {
1068 prop = of_get_property(dev->of_node, "fsl,liodn", &len);
1069 if (prop)
1070 group = get_device_iommu_group(dev);
1071 }
1072
1073 if (!group || IS_ERR(group))
1074 return PTR_ERR(group);
1075
1076 ret = iommu_group_add_device(group, dev);
1077
1078 iommu_group_put(group);
1079 return ret;
1080}
1081
1082static void fsl_pamu_remove_device(struct device *dev)
1083{
1084 iommu_group_remove_device(dev);
1085}
1086
1087static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
1088{
1089 struct fsl_dma_domain *dma_domain = domain->priv;
1090 unsigned long flags;
1091 int ret;
1092
1093 spin_lock_irqsave(&dma_domain->domain_lock, flags);
1094 /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
1095 if (dma_domain->enabled) {
1096 pr_debug("Can't set geometry attributes as domain is active\n");
1097 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1098 return -EBUSY;
1099 }
1100
1101 /* Ensure that the geometry has been set for the domain */
1102 if (!dma_domain->geom_size) {
1103 pr_debug("Please configure geometry before setting the number of windows\n");
1104 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1105 return -EINVAL;
1106 }
1107
1108 /*
1109 * Ensure we have valid window count i.e. it should be less than
1110 * maximum permissible limit and should be a power of two.
1111 */
1112 if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
1113 pr_debug("Invalid window count\n");
1114 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1115 return -EINVAL;
1116 }
1117
1118 ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
1119 ((w_count > 1) ? w_count : 0));
1120 if (!ret) {
1121 if (dma_domain->win_arr)
1122 kfree(dma_domain->win_arr);
1123 dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
1124 w_count, GFP_ATOMIC);
1125 if (!dma_domain->win_arr) {
1126 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1127 return -ENOMEM;
1128 }
1129 dma_domain->win_cnt = w_count;
1130 }
1131 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1132
1133 return ret;
1134}
1135
1136static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
1137{
1138 struct fsl_dma_domain *dma_domain = domain->priv;
1139
1140 return dma_domain->win_cnt;
1141}
1142
1143static struct iommu_ops fsl_pamu_ops = {
1144 .domain_init = fsl_pamu_domain_init,
1145 .domain_destroy = fsl_pamu_domain_destroy,
1146 .attach_dev = fsl_pamu_attach_device,
1147 .detach_dev = fsl_pamu_detach_device,
1148 .domain_window_enable = fsl_pamu_window_enable,
1149 .domain_window_disable = fsl_pamu_window_disable,
1150 .domain_get_windows = fsl_pamu_get_windows,
1151 .domain_set_windows = fsl_pamu_set_windows,
1152 .iova_to_phys = fsl_pamu_iova_to_phys,
1153 .domain_has_cap = fsl_pamu_domain_has_cap,
1154 .domain_set_attr = fsl_pamu_set_domain_attr,
1155 .domain_get_attr = fsl_pamu_get_domain_attr,
1156 .add_device = fsl_pamu_add_device,
1157 .remove_device = fsl_pamu_remove_device,
1158};
1159
1160int pamu_domain_init()
1161{
1162 int ret = 0;
1163
1164 ret = iommu_init_mempool();
1165 if (ret)
1166 return ret;
1167
1168 bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
1169 bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
1170
1171 return ret;
1172}
diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h
new file mode 100644
index 000000000000..c90293f99709
--- /dev/null
+++ b/drivers/iommu/fsl_pamu_domain.h
@@ -0,0 +1,85 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
16 *
17 */
18
19#ifndef __FSL_PAMU_DOMAIN_H
20#define __FSL_PAMU_DOMAIN_H
21
22#include "fsl_pamu.h"
23
24struct dma_window {
25 phys_addr_t paddr;
26 u64 size;
27 int valid;
28 int prot;
29};
30
31struct fsl_dma_domain {
32 /*
33 * Indicates the geometry size for the domain.
34 * This would be set when the geometry is
35 * configured for the domain.
36 */
37 dma_addr_t geom_size;
38 /*
39 * Number of windows assocaited with this domain.
40 * During domain initialization, it is set to the
41 * the maximum number of subwindows allowed for a LIODN.
42 * Minimum value for this is 1 indicating a single PAMU
43 * window, without any sub windows. Value can be set/
44 * queried by set_attr/get_attr API for DOMAIN_ATTR_WINDOWS.
45 * Value can only be set once the geometry has been configured.
46 */
47 u32 win_cnt;
48 /*
49 * win_arr contains information of the configured
50 * windows for a domain. This is allocated only
51 * when the number of windows for the domain are
52 * set.
53 */
54 struct dma_window *win_arr;
55 /* list of devices associated with the domain */
56 struct list_head devices;
57 /* dma_domain states:
58 * mapped - A particular mapping has been created
59 * within the configured geometry.
60 * enabled - DMA has been enabled for the given
61 * domain. This translates to setting of the
62 * valid bit for the primary PAACE in the PAMU
63 * PAACT table. Domain geometry should be set and
64 * it must have a valid mapping before DMA can be
65 * enabled for it.
66 *
67 */
68 int mapped;
69 int enabled;
70 /* stash_id obtained from the stash attribute details */
71 u32 stash_id;
72 struct pamu_stash_attribute dma_stash;
73 u32 snoop_id;
74 struct iommu_domain *iommu_domain;
75 spinlock_t domain_lock;
76};
77
78/* domain-device relationship */
79struct device_domain_info {
80 struct list_head link; /* link to domain siblings */
81 struct device *dev;
82 u32 liodn;
83 struct fsl_dma_domain *domain; /* pointer to domain */
84};
85#endif /* __FSL_PAMU_DOMAIN_H */