diff options
-rw-r--r-- | arch/powerpc/include/asm/fsl_pamu_stash.h | 4 | ||||
-rw-r--r-- | drivers/iommu/fsl_pamu.c | 203 | ||||
-rw-r--r-- | drivers/iommu/fsl_pamu.h | 15 | ||||
-rw-r--r-- | drivers/iommu/fsl_pamu_domain.c | 173 |
4 files changed, 179 insertions, 216 deletions
diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h index caa1b21c25cd..38311c98eed9 100644 --- a/arch/powerpc/include/asm/fsl_pamu_stash.h +++ b/arch/powerpc/include/asm/fsl_pamu_stash.h | |||
@@ -32,8 +32,8 @@ enum pamu_stash_target { | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | struct pamu_stash_attribute { | 34 | struct pamu_stash_attribute { |
35 | u32 cpu; /* cpu number */ | 35 | u32 cpu; /* cpu number */ |
36 | u32 cache; /* cache to stash to: L1,L2,L3 */ | 36 | u32 cache; /* cache to stash to: L1,L2,L3 */ |
37 | }; | 37 | }; |
38 | 38 | ||
39 | #endif /* __FSL_PAMU_STASH_H */ | 39 | #endif /* __FSL_PAMU_STASH_H */ |
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index 6ee947e41870..abeedc9a78c2 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c | |||
@@ -18,22 +18,13 @@ | |||
18 | 18 | ||
19 | #define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__ | 19 | #define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__ |
20 | 20 | ||
21 | #include <linux/init.h> | 21 | #include "fsl_pamu.h" |
22 | #include <linux/iommu.h> | 22 | |
23 | #include <linux/slab.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
28 | #include <linux/device.h> | ||
29 | #include <linux/of_platform.h> | ||
30 | #include <linux/bootmem.h> | ||
31 | #include <linux/genalloc.h> | 24 | #include <linux/genalloc.h> |
32 | #include <asm/io.h> | ||
33 | #include <asm/bitops.h> | ||
34 | #include <asm/fsl_guts.h> | ||
35 | 25 | ||
36 | #include "fsl_pamu.h" | 26 | #include <asm/mpc85xx.h> |
27 | #include <asm/fsl_guts.h> | ||
37 | 28 | ||
38 | /* define indexes for each operation mapping scenario */ | 29 | /* define indexes for each operation mapping scenario */ |
39 | #define OMI_QMAN 0x00 | 30 | #define OMI_QMAN 0x00 |
@@ -44,13 +35,13 @@ | |||
44 | #define make64(high, low) (((u64)(high) << 32) | (low)) | 35 | #define make64(high, low) (((u64)(high) << 32) | (low)) |
45 | 36 | ||
46 | struct pamu_isr_data { | 37 | struct pamu_isr_data { |
47 | void __iomem *pamu_reg_base; /* Base address of PAMU regs*/ | 38 | void __iomem *pamu_reg_base; /* Base address of PAMU regs */ |
48 | unsigned int count; /* The number of PAMUs */ | 39 | unsigned int count; /* The number of PAMUs */ |
49 | }; | 40 | }; |
50 | 41 | ||
51 | static struct paace *ppaact; | 42 | static struct paace *ppaact; |
52 | static struct paace *spaact; | 43 | static struct paace *spaact; |
53 | static struct ome *omt; | 44 | static struct ome *omt __initdata; |
54 | 45 | ||
55 | /* | 46 | /* |
56 | * Table for matching compatible strings, for device tree | 47 | * Table for matching compatible strings, for device tree |
@@ -58,14 +49,13 @@ static struct ome *omt; | |||
58 | * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4 | 49 | * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4 |
59 | * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0" | 50 | * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0" |
60 | * string would be used. | 51 | * string would be used. |
61 | */ | 52 | */ |
62 | static const struct of_device_id guts_device_ids[] = { | 53 | static const struct of_device_id guts_device_ids[] __initconst = { |
63 | { .compatible = "fsl,qoriq-device-config-1.0", }, | 54 | { .compatible = "fsl,qoriq-device-config-1.0", }, |
64 | { .compatible = "fsl,qoriq-device-config-2.0", }, | 55 | { .compatible = "fsl,qoriq-device-config-2.0", }, |
65 | {} | 56 | {} |
66 | }; | 57 | }; |
67 | 58 | ||
68 | |||
69 | /* | 59 | /* |
70 | * Table for matching compatible strings, for device tree | 60 | * Table for matching compatible strings, for device tree |
71 | * L3 cache controller node. | 61 | * L3 cache controller node. |
@@ -73,7 +63,7 @@ static const struct of_device_id guts_device_ids[] = { | |||
73 | * "fsl,b4860-l3-cache-controller" corresponds to B4 & | 63 | * "fsl,b4860-l3-cache-controller" corresponds to B4 & |
74 | * "fsl,p4080-l3-cache-controller" corresponds to other, | 64 | * "fsl,p4080-l3-cache-controller" corresponds to other, |
75 | * SOCs. | 65 | * SOCs. |
76 | */ | 66 | */ |
77 | static const struct of_device_id l3_device_ids[] = { | 67 | static const struct of_device_id l3_device_ids[] = { |
78 | { .compatible = "fsl,t4240-l3-cache-controller", }, | 68 | { .compatible = "fsl,t4240-l3-cache-controller", }, |
79 | { .compatible = "fsl,b4860-l3-cache-controller", }, | 69 | { .compatible = "fsl,b4860-l3-cache-controller", }, |
@@ -85,7 +75,7 @@ static const struct of_device_id l3_device_ids[] = { | |||
85 | static u32 max_subwindow_count; | 75 | static u32 max_subwindow_count; |
86 | 76 | ||
87 | /* Pool for fspi allocation */ | 77 | /* Pool for fspi allocation */ |
88 | struct gen_pool *spaace_pool; | 78 | static struct gen_pool *spaace_pool; |
89 | 79 | ||
90 | /** | 80 | /** |
91 | * pamu_get_max_subwin_cnt() - Return the maximum supported | 81 | * pamu_get_max_subwin_cnt() - Return the maximum supported |
@@ -170,7 +160,7 @@ int pamu_disable_liodn(int liodn) | |||
170 | static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) | 160 | static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) |
171 | { | 161 | { |
172 | /* Bug if not a power of 2 */ | 162 | /* Bug if not a power of 2 */ |
173 | BUG_ON((addrspace_size & (addrspace_size - 1))); | 163 | BUG_ON(addrspace_size & (addrspace_size - 1)); |
174 | 164 | ||
175 | /* window size is 2^(WSE+1) bytes */ | 165 | /* window size is 2^(WSE+1) bytes */ |
176 | return fls64(addrspace_size) - 2; | 166 | return fls64(addrspace_size) - 2; |
@@ -179,8 +169,8 @@ static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) | |||
179 | /* Derive the PAACE window count encoding for the subwindow count */ | 169 | /* Derive the PAACE window count encoding for the subwindow count */ |
180 | static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt) | 170 | static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt) |
181 | { | 171 | { |
182 | /* window count is 2^(WCE+1) bytes */ | 172 | /* window count is 2^(WCE+1) bytes */ |
183 | return __ffs(subwindow_cnt) - 1; | 173 | return __ffs(subwindow_cnt) - 1; |
184 | } | 174 | } |
185 | 175 | ||
186 | /* | 176 | /* |
@@ -241,7 +231,7 @@ static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum) | |||
241 | * If no SPAACE entry is available or the allocator can not reserve the required | 231 | * If no SPAACE entry is available or the allocator can not reserve the required |
242 | * number of contiguous entries function returns ULONG_MAX indicating a failure. | 232 | * number of contiguous entries function returns ULONG_MAX indicating a failure. |
243 | * | 233 | * |
244 | */ | 234 | */ |
245 | static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt) | 235 | static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt) |
246 | { | 236 | { |
247 | unsigned long spaace_addr; | 237 | unsigned long spaace_addr; |
@@ -288,9 +278,8 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value) | |||
288 | } | 278 | } |
289 | if (subwin) { | 279 | if (subwin) { |
290 | paace = pamu_get_spaace(paace, subwin - 1); | 280 | paace = pamu_get_spaace(paace, subwin - 1); |
291 | if (!paace) { | 281 | if (!paace) |
292 | return -ENOENT; | 282 | return -ENOENT; |
293 | } | ||
294 | } | 283 | } |
295 | set_bf(paace->impl_attr, PAACE_IA_CID, value); | 284 | set_bf(paace->impl_attr, PAACE_IA_CID, value); |
296 | 285 | ||
@@ -311,14 +300,12 @@ int pamu_disable_spaace(int liodn, u32 subwin) | |||
311 | } | 300 | } |
312 | if (subwin) { | 301 | if (subwin) { |
313 | paace = pamu_get_spaace(paace, subwin - 1); | 302 | paace = pamu_get_spaace(paace, subwin - 1); |
314 | if (!paace) { | 303 | if (!paace) |
315 | return -ENOENT; | 304 | return -ENOENT; |
316 | } | 305 | set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID); |
317 | set_bf(paace->addr_bitfields, PAACE_AF_V, | ||
318 | PAACE_V_INVALID); | ||
319 | } else { | 306 | } else { |
320 | set_bf(paace->addr_bitfields, PAACE_AF_AP, | 307 | set_bf(paace->addr_bitfields, PAACE_AF_AP, |
321 | PAACE_AP_PERMS_DENIED); | 308 | PAACE_AP_PERMS_DENIED); |
322 | } | 309 | } |
323 | 310 | ||
324 | mb(); | 311 | mb(); |
@@ -326,7 +313,6 @@ int pamu_disable_spaace(int liodn, u32 subwin) | |||
326 | return 0; | 313 | return 0; |
327 | } | 314 | } |
328 | 315 | ||
329 | |||
330 | /** | 316 | /** |
331 | * pamu_config_paace() - Sets up PPAACE entry for specified liodn | 317 | * pamu_config_paace() - Sets up PPAACE entry for specified liodn |
332 | * | 318 | * |
@@ -352,7 +338,8 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size, | |||
352 | unsigned long fspi; | 338 | unsigned long fspi; |
353 | 339 | ||
354 | if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) { | 340 | if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) { |
355 | pr_debug("window size too small or not a power of two %pa\n", &win_size); | 341 | pr_debug("window size too small or not a power of two %pa\n", |
342 | &win_size); | ||
356 | return -EINVAL; | 343 | return -EINVAL; |
357 | } | 344 | } |
358 | 345 | ||
@@ -362,13 +349,12 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size, | |||
362 | } | 349 | } |
363 | 350 | ||
364 | ppaace = pamu_get_ppaace(liodn); | 351 | ppaace = pamu_get_ppaace(liodn); |
365 | if (!ppaace) { | 352 | if (!ppaace) |
366 | return -ENOENT; | 353 | return -ENOENT; |
367 | } | ||
368 | 354 | ||
369 | /* window size is 2^(WSE+1) bytes */ | 355 | /* window size is 2^(WSE+1) bytes */ |
370 | set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, | 356 | set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, |
371 | map_addrspace_size_to_wse(win_size)); | 357 | map_addrspace_size_to_wse(win_size)); |
372 | 358 | ||
373 | pamu_init_ppaace(ppaace); | 359 | pamu_init_ppaace(ppaace); |
374 | 360 | ||
@@ -442,7 +428,6 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin, | |||
442 | { | 428 | { |
443 | struct paace *paace; | 429 | struct paace *paace; |
444 | 430 | ||
445 | |||
446 | /* setup sub-windows */ | 431 | /* setup sub-windows */ |
447 | if (!subwin_cnt) { | 432 | if (!subwin_cnt) { |
448 | pr_debug("Invalid subwindow count\n"); | 433 | pr_debug("Invalid subwindow count\n"); |
@@ -510,11 +495,11 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin, | |||
510 | } | 495 | } |
511 | 496 | ||
512 | /** | 497 | /** |
513 | * get_ome_index() - Returns the index in the operation mapping table | 498 | * get_ome_index() - Returns the index in the operation mapping table |
514 | * for device. | 499 | * for device. |
515 | * @*omi_index: pointer for storing the index value | 500 | * @*omi_index: pointer for storing the index value |
516 | * | 501 | * |
517 | */ | 502 | */ |
518 | void get_ome_index(u32 *omi_index, struct device *dev) | 503 | void get_ome_index(u32 *omi_index, struct device *dev) |
519 | { | 504 | { |
520 | if (of_device_is_compatible(dev->of_node, "fsl,qman-portal")) | 505 | if (of_device_is_compatible(dev->of_node, "fsl,qman-portal")) |
@@ -544,9 +529,10 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu) | |||
544 | if (stash_dest_hint == PAMU_ATTR_CACHE_L3) { | 529 | if (stash_dest_hint == PAMU_ATTR_CACHE_L3) { |
545 | node = of_find_matching_node(NULL, l3_device_ids); | 530 | node = of_find_matching_node(NULL, l3_device_ids); |
546 | if (node) { | 531 | if (node) { |
547 | prop = of_get_property(node, "cache-stash-id", 0); | 532 | prop = of_get_property(node, "cache-stash-id", NULL); |
548 | if (!prop) { | 533 | if (!prop) { |
549 | pr_debug("missing cache-stash-id at %s\n", node->full_name); | 534 | pr_debug("missing cache-stash-id at %s\n", |
535 | node->full_name); | ||
550 | of_node_put(node); | 536 | of_node_put(node); |
551 | return ~(u32)0; | 537 | return ~(u32)0; |
552 | } | 538 | } |
@@ -570,9 +556,10 @@ found_cpu_node: | |||
570 | /* find the hwnode that represents the cache */ | 556 | /* find the hwnode that represents the cache */ |
571 | for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) { | 557 | for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) { |
572 | if (stash_dest_hint == cache_level) { | 558 | if (stash_dest_hint == cache_level) { |
573 | prop = of_get_property(node, "cache-stash-id", 0); | 559 | prop = of_get_property(node, "cache-stash-id", NULL); |
574 | if (!prop) { | 560 | if (!prop) { |
575 | pr_debug("missing cache-stash-id at %s\n", node->full_name); | 561 | pr_debug("missing cache-stash-id at %s\n", |
562 | node->full_name); | ||
576 | of_node_put(node); | 563 | of_node_put(node); |
577 | return ~(u32)0; | 564 | return ~(u32)0; |
578 | } | 565 | } |
@@ -580,10 +567,10 @@ found_cpu_node: | |||
580 | return be32_to_cpup(prop); | 567 | return be32_to_cpup(prop); |
581 | } | 568 | } |
582 | 569 | ||
583 | prop = of_get_property(node, "next-level-cache", 0); | 570 | prop = of_get_property(node, "next-level-cache", NULL); |
584 | if (!prop) { | 571 | if (!prop) { |
585 | pr_debug("can't find next-level-cache at %s\n", | 572 | pr_debug("can't find next-level-cache at %s\n", |
586 | node->full_name); | 573 | node->full_name); |
587 | of_node_put(node); | 574 | of_node_put(node); |
588 | return ~(u32)0; /* can't traverse any further */ | 575 | return ~(u32)0; /* can't traverse any further */ |
589 | } | 576 | } |
@@ -598,7 +585,7 @@ found_cpu_node: | |||
598 | } | 585 | } |
599 | 586 | ||
600 | pr_debug("stash dest not found for %d on vcpu %d\n", | 587 | pr_debug("stash dest not found for %d on vcpu %d\n", |
601 | stash_dest_hint, vcpu); | 588 | stash_dest_hint, vcpu); |
602 | return ~(u32)0; | 589 | return ~(u32)0; |
603 | } | 590 | } |
604 | 591 | ||
@@ -612,7 +599,7 @@ found_cpu_node: | |||
612 | * Memory accesses to QMAN and BMAN private memory need not be coherent, so | 599 | * Memory accesses to QMAN and BMAN private memory need not be coherent, so |
613 | * clear the PAACE entry coherency attribute for them. | 600 | * clear the PAACE entry coherency attribute for them. |
614 | */ | 601 | */ |
615 | static void setup_qbman_paace(struct paace *ppaace, int paace_type) | 602 | static void __init setup_qbman_paace(struct paace *ppaace, int paace_type) |
616 | { | 603 | { |
617 | switch (paace_type) { | 604 | switch (paace_type) { |
618 | case QMAN_PAACE: | 605 | case QMAN_PAACE: |
@@ -626,7 +613,7 @@ static void setup_qbman_paace(struct paace *ppaace, int paace_type) | |||
626 | case QMAN_PORTAL_PAACE: | 613 | case QMAN_PORTAL_PAACE: |
627 | set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); | 614 | set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); |
628 | ppaace->op_encode.index_ot.omi = OMI_QMAN; | 615 | ppaace->op_encode.index_ot.omi = OMI_QMAN; |
629 | /*Set DQRR and Frame stashing for the L3 cache */ | 616 | /* Set DQRR and Frame stashing for the L3 cache */ |
630 | set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0)); | 617 | set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0)); |
631 | break; | 618 | break; |
632 | case BMAN_PAACE: | 619 | case BMAN_PAACE: |
@@ -679,7 +666,7 @@ static void __init setup_omt(struct ome *omt) | |||
679 | * Get the maximum number of PAACT table entries | 666 | * Get the maximum number of PAACT table entries |
680 | * and subwindows supported by PAMU | 667 | * and subwindows supported by PAMU |
681 | */ | 668 | */ |
682 | static void get_pamu_cap_values(unsigned long pamu_reg_base) | 669 | static void __init get_pamu_cap_values(unsigned long pamu_reg_base) |
683 | { | 670 | { |
684 | u32 pc_val; | 671 | u32 pc_val; |
685 | 672 | ||
@@ -689,9 +676,9 @@ static void get_pamu_cap_values(unsigned long pamu_reg_base) | |||
689 | } | 676 | } |
690 | 677 | ||
691 | /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */ | 678 | /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */ |
692 | int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, | 679 | static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, |
693 | phys_addr_t ppaact_phys, phys_addr_t spaact_phys, | 680 | phys_addr_t ppaact_phys, phys_addr_t spaact_phys, |
694 | phys_addr_t omt_phys) | 681 | phys_addr_t omt_phys) |
695 | { | 682 | { |
696 | u32 *pc; | 683 | u32 *pc; |
697 | struct pamu_mmap_regs *pamu_regs; | 684 | struct pamu_mmap_regs *pamu_regs; |
@@ -727,7 +714,7 @@ int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, | |||
727 | */ | 714 | */ |
728 | 715 | ||
729 | out_be32((u32 *)(pamu_reg_base + PAMU_PICS), | 716 | out_be32((u32 *)(pamu_reg_base + PAMU_PICS), |
730 | PAMU_ACCESS_VIOLATION_ENABLE); | 717 | PAMU_ACCESS_VIOLATION_ENABLE); |
731 | out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC); | 718 | out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC); |
732 | return 0; | 719 | return 0; |
733 | } | 720 | } |
@@ -757,9 +744,9 @@ static void __init setup_liodns(void) | |||
757 | ppaace->wbah = 0; | 744 | ppaace->wbah = 0; |
758 | set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0); | 745 | set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0); |
759 | set_bf(ppaace->impl_attr, PAACE_IA_ATM, | 746 | set_bf(ppaace->impl_attr, PAACE_IA_ATM, |
760 | PAACE_ATM_NO_XLATE); | 747 | PAACE_ATM_NO_XLATE); |
761 | set_bf(ppaace->addr_bitfields, PAACE_AF_AP, | 748 | set_bf(ppaace->addr_bitfields, PAACE_AF_AP, |
762 | PAACE_AP_PERMS_ALL); | 749 | PAACE_AP_PERMS_ALL); |
763 | if (of_device_is_compatible(node, "fsl,qman-portal")) | 750 | if (of_device_is_compatible(node, "fsl,qman-portal")) |
764 | setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE); | 751 | setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE); |
765 | if (of_device_is_compatible(node, "fsl,qman")) | 752 | if (of_device_is_compatible(node, "fsl,qman")) |
@@ -772,7 +759,7 @@ static void __init setup_liodns(void) | |||
772 | } | 759 | } |
773 | } | 760 | } |
774 | 761 | ||
775 | irqreturn_t pamu_av_isr(int irq, void *arg) | 762 | static irqreturn_t pamu_av_isr(int irq, void *arg) |
776 | { | 763 | { |
777 | struct pamu_isr_data *data = arg; | 764 | struct pamu_isr_data *data = arg; |
778 | phys_addr_t phys; | 765 | phys_addr_t phys; |
@@ -792,14 +779,16 @@ irqreturn_t pamu_av_isr(int irq, void *arg) | |||
792 | pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2)); | 779 | pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2)); |
793 | pr_emerg("AVS1=%08x\n", avs1); | 780 | pr_emerg("AVS1=%08x\n", avs1); |
794 | pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2)); | 781 | pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2)); |
795 | pr_emerg("AVA=%016llx\n", make64(in_be32(p + PAMU_AVAH), | 782 | pr_emerg("AVA=%016llx\n", |
796 | in_be32(p + PAMU_AVAL))); | 783 | make64(in_be32(p + PAMU_AVAH), |
784 | in_be32(p + PAMU_AVAL))); | ||
797 | pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD)); | 785 | pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD)); |
798 | pr_emerg("POEA=%016llx\n", make64(in_be32(p + PAMU_POEAH), | 786 | pr_emerg("POEA=%016llx\n", |
799 | in_be32(p + PAMU_POEAL))); | 787 | make64(in_be32(p + PAMU_POEAH), |
788 | in_be32(p + PAMU_POEAL))); | ||
800 | 789 | ||
801 | phys = make64(in_be32(p + PAMU_POEAH), | 790 | phys = make64(in_be32(p + PAMU_POEAH), |
802 | in_be32(p + PAMU_POEAL)); | 791 | in_be32(p + PAMU_POEAL)); |
803 | 792 | ||
804 | /* Assume that POEA points to a PAACE */ | 793 | /* Assume that POEA points to a PAACE */ |
805 | if (phys) { | 794 | if (phys) { |
@@ -807,11 +796,12 @@ irqreturn_t pamu_av_isr(int irq, void *arg) | |||
807 | 796 | ||
808 | /* Only the first four words are relevant */ | 797 | /* Only the first four words are relevant */ |
809 | for (j = 0; j < 4; j++) | 798 | for (j = 0; j < 4; j++) |
810 | pr_emerg("PAACE[%u]=%08x\n", j, in_be32(paace + j)); | 799 | pr_emerg("PAACE[%u]=%08x\n", |
800 | j, in_be32(paace + j)); | ||
811 | } | 801 | } |
812 | 802 | ||
813 | /* clear access violation condition */ | 803 | /* clear access violation condition */ |
814 | out_be32((p + PAMU_AVS1), avs1 & PAMU_AV_MASK); | 804 | out_be32(p + PAMU_AVS1, avs1 & PAMU_AV_MASK); |
815 | paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT); | 805 | paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT); |
816 | BUG_ON(!paace); | 806 | BUG_ON(!paace); |
817 | /* check if we got a violation for a disabled LIODN */ | 807 | /* check if we got a violation for a disabled LIODN */ |
@@ -827,13 +817,13 @@ irqreturn_t pamu_av_isr(int irq, void *arg) | |||
827 | /* Disable the LIODN */ | 817 | /* Disable the LIODN */ |
828 | ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT); | 818 | ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT); |
829 | BUG_ON(ret); | 819 | BUG_ON(ret); |
830 | pr_emerg("Disabling liodn %x\n", avs1 >> PAMU_AVS1_LIODN_SHIFT); | 820 | pr_emerg("Disabling liodn %x\n", |
821 | avs1 >> PAMU_AVS1_LIODN_SHIFT); | ||
831 | } | 822 | } |
832 | out_be32((p + PAMU_PICS), pics); | 823 | out_be32((p + PAMU_PICS), pics); |
833 | } | 824 | } |
834 | } | 825 | } |
835 | 826 | ||
836 | |||
837 | return IRQ_HANDLED; | 827 | return IRQ_HANDLED; |
838 | } | 828 | } |
839 | 829 | ||
@@ -952,7 +942,7 @@ static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id) | |||
952 | } | 942 | } |
953 | 943 | ||
954 | if (i == 0 || i == num_laws) { | 944 | if (i == 0 || i == num_laws) { |
955 | /* This should never happen*/ | 945 | /* This should never happen */ |
956 | ret = -ENOENT; | 946 | ret = -ENOENT; |
957 | goto error; | 947 | goto error; |
958 | } | 948 | } |
@@ -998,26 +988,27 @@ error: | |||
998 | static const struct { | 988 | static const struct { |
999 | u32 svr; | 989 | u32 svr; |
1000 | u32 port_id; | 990 | u32 port_id; |
1001 | } port_id_map[] = { | 991 | } port_id_map[] __initconst = { |
1002 | {0x82100010, 0xFF000000}, /* P2040 1.0 */ | 992 | {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */ |
1003 | {0x82100011, 0xFF000000}, /* P2040 1.1 */ | 993 | {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */ |
1004 | {0x82100110, 0xFF000000}, /* P2041 1.0 */ | 994 | {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */ |
1005 | {0x82100111, 0xFF000000}, /* P2041 1.1 */ | 995 | {(SVR_P2041 << 8) | 0x11, 0xFF000000}, /* P2041 1.1 */ |
1006 | {0x82110310, 0xFF000000}, /* P3041 1.0 */ | 996 | {(SVR_P3041 << 8) | 0x10, 0xFF000000}, /* P3041 1.0 */ |
1007 | {0x82110311, 0xFF000000}, /* P3041 1.1 */ | 997 | {(SVR_P3041 << 8) | 0x11, 0xFF000000}, /* P3041 1.1 */ |
1008 | {0x82010020, 0xFFF80000}, /* P4040 2.0 */ | 998 | {(SVR_P4040 << 8) | 0x20, 0xFFF80000}, /* P4040 2.0 */ |
1009 | {0x82000020, 0xFFF80000}, /* P4080 2.0 */ | 999 | {(SVR_P4080 << 8) | 0x20, 0xFFF80000}, /* P4080 2.0 */ |
1010 | {0x82210010, 0xFC000000}, /* P5010 1.0 */ | 1000 | {(SVR_P5010 << 8) | 0x10, 0xFC000000}, /* P5010 1.0 */ |
1011 | {0x82210020, 0xFC000000}, /* P5010 2.0 */ | 1001 | {(SVR_P5010 << 8) | 0x20, 0xFC000000}, /* P5010 2.0 */ |
1012 | {0x82200010, 0xFC000000}, /* P5020 1.0 */ | 1002 | {(SVR_P5020 << 8) | 0x10, 0xFC000000}, /* P5020 1.0 */ |
1013 | {0x82050010, 0xFF800000}, /* P5021 1.0 */ | 1003 | {(SVR_P5021 << 8) | 0x10, 0xFF800000}, /* P5021 1.0 */ |
1014 | {0x82040010, 0xFF800000}, /* P5040 1.0 */ | 1004 | {(SVR_P5040 << 8) | 0x10, 0xFF800000}, /* P5040 1.0 */ |
1015 | }; | 1005 | }; |
1016 | 1006 | ||
1017 | #define SVR_SECURITY 0x80000 /* The Security (E) bit */ | 1007 | #define SVR_SECURITY 0x80000 /* The Security (E) bit */ |
1018 | 1008 | ||
1019 | static int __init fsl_pamu_probe(struct platform_device *pdev) | 1009 | static int __init fsl_pamu_probe(struct platform_device *pdev) |
1020 | { | 1010 | { |
1011 | struct device *dev = &pdev->dev; | ||
1021 | void __iomem *pamu_regs = NULL; | 1012 | void __iomem *pamu_regs = NULL; |
1022 | struct ccsr_guts __iomem *guts_regs = NULL; | 1013 | struct ccsr_guts __iomem *guts_regs = NULL; |
1023 | u32 pamubypenr, pamu_counter; | 1014 | u32 pamubypenr, pamu_counter; |
@@ -1042,22 +1033,21 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1042 | * NOTE : All PAMUs share the same LIODN tables. | 1033 | * NOTE : All PAMUs share the same LIODN tables. |
1043 | */ | 1034 | */ |
1044 | 1035 | ||
1045 | pamu_regs = of_iomap(pdev->dev.of_node, 0); | 1036 | pamu_regs = of_iomap(dev->of_node, 0); |
1046 | if (!pamu_regs) { | 1037 | if (!pamu_regs) { |
1047 | dev_err(&pdev->dev, "ioremap of PAMU node failed\n"); | 1038 | dev_err(dev, "ioremap of PAMU node failed\n"); |
1048 | return -ENOMEM; | 1039 | return -ENOMEM; |
1049 | } | 1040 | } |
1050 | of_get_address(pdev->dev.of_node, 0, &size, NULL); | 1041 | of_get_address(dev->of_node, 0, &size, NULL); |
1051 | 1042 | ||
1052 | irq = irq_of_parse_and_map(pdev->dev.of_node, 0); | 1043 | irq = irq_of_parse_and_map(dev->of_node, 0); |
1053 | if (irq == NO_IRQ) { | 1044 | if (irq == NO_IRQ) { |
1054 | dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n"); | 1045 | dev_warn(dev, "no interrupts listed in PAMU node\n"); |
1055 | goto error; | 1046 | goto error; |
1056 | } | 1047 | } |
1057 | 1048 | ||
1058 | data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL); | 1049 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
1059 | if (!data) { | 1050 | if (!data) { |
1060 | dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n"); | ||
1061 | ret = -ENOMEM; | 1051 | ret = -ENOMEM; |
1062 | goto error; | 1052 | goto error; |
1063 | } | 1053 | } |
@@ -1067,15 +1057,14 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1067 | /* The ISR needs access to the regs, so we won't iounmap them */ | 1057 | /* The ISR needs access to the regs, so we won't iounmap them */ |
1068 | ret = request_irq(irq, pamu_av_isr, 0, "pamu", data); | 1058 | ret = request_irq(irq, pamu_av_isr, 0, "pamu", data); |
1069 | if (ret < 0) { | 1059 | if (ret < 0) { |
1070 | dev_err(&pdev->dev, "error %i installing ISR for irq %i\n", | 1060 | dev_err(dev, "error %i installing ISR for irq %i\n", ret, irq); |
1071 | ret, irq); | ||
1072 | goto error; | 1061 | goto error; |
1073 | } | 1062 | } |
1074 | 1063 | ||
1075 | guts_node = of_find_matching_node(NULL, guts_device_ids); | 1064 | guts_node = of_find_matching_node(NULL, guts_device_ids); |
1076 | if (!guts_node) { | 1065 | if (!guts_node) { |
1077 | dev_err(&pdev->dev, "could not find GUTS node %s\n", | 1066 | dev_err(dev, "could not find GUTS node %s\n", |
1078 | pdev->dev.of_node->full_name); | 1067 | dev->of_node->full_name); |
1079 | ret = -ENODEV; | 1068 | ret = -ENODEV; |
1080 | goto error; | 1069 | goto error; |
1081 | } | 1070 | } |
@@ -1083,7 +1072,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1083 | guts_regs = of_iomap(guts_node, 0); | 1072 | guts_regs = of_iomap(guts_node, 0); |
1084 | of_node_put(guts_node); | 1073 | of_node_put(guts_node); |
1085 | if (!guts_regs) { | 1074 | if (!guts_regs) { |
1086 | dev_err(&pdev->dev, "ioremap of GUTS node failed\n"); | 1075 | dev_err(dev, "ioremap of GUTS node failed\n"); |
1087 | ret = -ENODEV; | 1076 | ret = -ENODEV; |
1088 | goto error; | 1077 | goto error; |
1089 | } | 1078 | } |
@@ -1103,7 +1092,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1103 | 1092 | ||
1104 | p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); | 1093 | p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); |
1105 | if (!p) { | 1094 | if (!p) { |
1106 | dev_err(&pdev->dev, "unable to allocate PAACT/SPAACT/OMT block\n"); | 1095 | dev_err(dev, "unable to allocate PAACT/SPAACT/OMT block\n"); |
1107 | ret = -ENOMEM; | 1096 | ret = -ENOMEM; |
1108 | goto error; | 1097 | goto error; |
1109 | } | 1098 | } |
@@ -1113,7 +1102,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1113 | 1102 | ||
1114 | /* Make sure the memory is naturally aligned */ | 1103 | /* Make sure the memory is naturally aligned */ |
1115 | if (ppaact_phys & ((PAGE_SIZE << order) - 1)) { | 1104 | if (ppaact_phys & ((PAGE_SIZE << order) - 1)) { |
1116 | dev_err(&pdev->dev, "PAACT/OMT block is unaligned\n"); | 1105 | dev_err(dev, "PAACT/OMT block is unaligned\n"); |
1117 | ret = -ENOMEM; | 1106 | ret = -ENOMEM; |
1118 | goto error; | 1107 | goto error; |
1119 | } | 1108 | } |
@@ -1121,7 +1110,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1121 | spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE)); | 1110 | spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE)); |
1122 | omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE)); | 1111 | omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE)); |
1123 | 1112 | ||
1124 | dev_dbg(&pdev->dev, "ppaact virt=%p phys=%pa\n", ppaact, &ppaact_phys); | 1113 | dev_dbg(dev, "ppaact virt=%p phys=%pa\n", ppaact, &ppaact_phys); |
1125 | 1114 | ||
1126 | /* Check to see if we need to implement the work-around on this SOC */ | 1115 | /* Check to see if we need to implement the work-around on this SOC */ |
1127 | 1116 | ||
@@ -1129,21 +1118,19 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1129 | for (i = 0; i < ARRAY_SIZE(port_id_map); i++) { | 1118 | for (i = 0; i < ARRAY_SIZE(port_id_map); i++) { |
1130 | if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) { | 1119 | if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) { |
1131 | csd_port_id = port_id_map[i].port_id; | 1120 | csd_port_id = port_id_map[i].port_id; |
1132 | dev_dbg(&pdev->dev, "found matching SVR %08x\n", | 1121 | dev_dbg(dev, "found matching SVR %08x\n", |
1133 | port_id_map[i].svr); | 1122 | port_id_map[i].svr); |
1134 | break; | 1123 | break; |
1135 | } | 1124 | } |
1136 | } | 1125 | } |
1137 | 1126 | ||
1138 | if (csd_port_id) { | 1127 | if (csd_port_id) { |
1139 | dev_dbg(&pdev->dev, "creating coherency subdomain at address " | 1128 | dev_dbg(dev, "creating coherency subdomain at address %pa, size %zu, port id 0x%08x", |
1140 | "%pa, size %zu, port id 0x%08x", &ppaact_phys, | 1129 | &ppaact_phys, mem_size, csd_port_id); |
1141 | mem_size, csd_port_id); | ||
1142 | 1130 | ||
1143 | ret = create_csd(ppaact_phys, mem_size, csd_port_id); | 1131 | ret = create_csd(ppaact_phys, mem_size, csd_port_id); |
1144 | if (ret) { | 1132 | if (ret) { |
1145 | dev_err(&pdev->dev, "could not create coherence " | 1133 | dev_err(dev, "could not create coherence subdomain\n"); |
1146 | "subdomain\n"); | ||
1147 | return ret; | 1134 | return ret; |
1148 | } | 1135 | } |
1149 | } | 1136 | } |
@@ -1154,7 +1141,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1154 | spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1); | 1141 | spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1); |
1155 | if (!spaace_pool) { | 1142 | if (!spaace_pool) { |
1156 | ret = -ENOMEM; | 1143 | ret = -ENOMEM; |
1157 | dev_err(&pdev->dev, "PAMU : failed to allocate spaace gen pool\n"); | 1144 | dev_err(dev, "Failed to allocate spaace gen pool\n"); |
1158 | goto error; | 1145 | goto error; |
1159 | } | 1146 | } |
1160 | 1147 | ||
@@ -1167,9 +1154,9 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1167 | for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size; | 1154 | for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size; |
1168 | pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) { | 1155 | pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) { |
1169 | 1156 | ||
1170 | pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off; | 1157 | pamu_reg_base = (unsigned long)pamu_regs + pamu_reg_off; |
1171 | setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys, | 1158 | setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys, |
1172 | spaact_phys, omt_phys); | 1159 | spaact_phys, omt_phys); |
1173 | /* Disable PAMU bypass for this PAMU */ | 1160 | /* Disable PAMU bypass for this PAMU */ |
1174 | pamubypenr &= ~pamu_counter; | 1161 | pamubypenr &= ~pamu_counter; |
1175 | } | 1162 | } |
@@ -1181,7 +1168,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1181 | 1168 | ||
1182 | iounmap(guts_regs); | 1169 | iounmap(guts_regs); |
1183 | 1170 | ||
1184 | /* Enable DMA for the LIODNs in the device tree*/ | 1171 | /* Enable DMA for the LIODNs in the device tree */ |
1185 | 1172 | ||
1186 | setup_liodns(); | 1173 | setup_liodns(); |
1187 | 1174 | ||
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h index 8fc1a125b16e..aab723f91f12 100644 --- a/drivers/iommu/fsl_pamu.h +++ b/drivers/iommu/fsl_pamu.h | |||
@@ -19,13 +19,15 @@ | |||
19 | #ifndef __FSL_PAMU_H | 19 | #ifndef __FSL_PAMU_H |
20 | #define __FSL_PAMU_H | 20 | #define __FSL_PAMU_H |
21 | 21 | ||
22 | #include <linux/iommu.h> | ||
23 | |||
22 | #include <asm/fsl_pamu_stash.h> | 24 | #include <asm/fsl_pamu_stash.h> |
23 | 25 | ||
24 | /* Bit Field macros | 26 | /* Bit Field macros |
25 | * v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load | 27 | * v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load |
26 | */ | 28 | */ |
27 | #define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << (m##_SHIFT)) & (m))) | 29 | #define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << m##_SHIFT) & (m))) |
28 | #define get_bf(v, m) (((v) & (m)) >> (m##_SHIFT)) | 30 | #define get_bf(v, m) (((v) & (m)) >> m##_SHIFT) |
29 | 31 | ||
30 | /* PAMU CCSR space */ | 32 | /* PAMU CCSR space */ |
31 | #define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */ | 33 | #define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */ |
@@ -65,7 +67,7 @@ struct pamu_mmap_regs { | |||
65 | #define PAMU_AVS1_GCV 0x2000 | 67 | #define PAMU_AVS1_GCV 0x2000 |
66 | #define PAMU_AVS1_PDV 0x4000 | 68 | #define PAMU_AVS1_PDV 0x4000 |
67 | #define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \ | 69 | #define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \ |
68 | | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV) | 70 | | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV) |
69 | #define PAMU_AVS1_LIODN_SHIFT 16 | 71 | #define PAMU_AVS1_LIODN_SHIFT 16 |
70 | #define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400 | 72 | #define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400 |
71 | 73 | ||
@@ -198,8 +200,7 @@ struct pamu_mmap_regs { | |||
198 | #define PAACE_ATM_NO_XLATE 0x00 | 200 | #define PAACE_ATM_NO_XLATE 0x00 |
199 | #define PAACE_ATM_WINDOW_XLATE 0x01 | 201 | #define PAACE_ATM_WINDOW_XLATE 0x01 |
200 | #define PAACE_ATM_PAGE_XLATE 0x02 | 202 | #define PAACE_ATM_PAGE_XLATE 0x02 |
201 | #define PAACE_ATM_WIN_PG_XLATE \ | 203 | #define PAACE_ATM_WIN_PG_XLATE (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE) |
202 | (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE) | ||
203 | #define PAACE_OTM_NO_XLATE 0x00 | 204 | #define PAACE_OTM_NO_XLATE 0x00 |
204 | #define PAACE_OTM_IMMEDIATE 0x01 | 205 | #define PAACE_OTM_IMMEDIATE 0x01 |
205 | #define PAACE_OTM_INDEXED 0x02 | 206 | #define PAACE_OTM_INDEXED 0x02 |
@@ -219,7 +220,7 @@ struct pamu_mmap_regs { | |||
219 | #define PAACE_TCEF_FORMAT0_8B 0x00 | 220 | #define PAACE_TCEF_FORMAT0_8B 0x00 |
220 | #define PAACE_TCEF_FORMAT1_RSVD 0x01 | 221 | #define PAACE_TCEF_FORMAT1_RSVD 0x01 |
221 | /* | 222 | /* |
222 | * Hard coded value for the PAACT size to accomodate | 223 | * Hard coded value for the PAACT size to accommodate |
223 | * maximum LIODN value generated by u-boot. | 224 | * maximum LIODN value generated by u-boot. |
224 | */ | 225 | */ |
225 | #define PAACE_NUMBER_ENTRIES 0x500 | 226 | #define PAACE_NUMBER_ENTRIES 0x500 |
@@ -332,7 +333,7 @@ struct paace { | |||
332 | #define NUM_MOE 128 | 333 | #define NUM_MOE 128 |
333 | struct ome { | 334 | struct ome { |
334 | u8 moe[NUM_MOE]; | 335 | u8 moe[NUM_MOE]; |
335 | } __attribute__((packed)); | 336 | } __packed; |
336 | 337 | ||
337 | #define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES) | 338 | #define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES) |
338 | #define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES) | 339 | #define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES) |
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index c828f80d48b0..ceebd287b660 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c | |||
@@ -19,26 +19,10 @@ | |||
19 | 19 | ||
20 | #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ | 20 | #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ |
21 | 21 | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/iommu.h> | ||
24 | #include <linux/notifier.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/device.h> | ||
31 | #include <linux/of_platform.h> | ||
32 | #include <linux/bootmem.h> | ||
33 | #include <linux/err.h> | ||
34 | #include <asm/io.h> | ||
35 | #include <asm/bitops.h> | ||
36 | |||
37 | #include <asm/pci-bridge.h> | ||
38 | #include <sysdev/fsl_pci.h> | ||
39 | |||
40 | #include "fsl_pamu_domain.h" | 22 | #include "fsl_pamu_domain.h" |
41 | 23 | ||
24 | #include <sysdev/fsl_pci.h> | ||
25 | |||
42 | /* | 26 | /* |
43 | * Global spinlock that needs to be held while | 27 | * Global spinlock that needs to be held while |
44 | * configuring PAMU. | 28 | * configuring PAMU. |
@@ -51,23 +35,21 @@ static DEFINE_SPINLOCK(device_domain_lock); | |||
51 | 35 | ||
52 | static int __init iommu_init_mempool(void) | 36 | static int __init iommu_init_mempool(void) |
53 | { | 37 | { |
54 | |||
55 | fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", | 38 | fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", |
56 | sizeof(struct fsl_dma_domain), | 39 | sizeof(struct fsl_dma_domain), |
57 | 0, | 40 | 0, |
58 | SLAB_HWCACHE_ALIGN, | 41 | SLAB_HWCACHE_ALIGN, |
59 | 42 | NULL); | |
60 | NULL); | ||
61 | if (!fsl_pamu_domain_cache) { | 43 | if (!fsl_pamu_domain_cache) { |
62 | pr_debug("Couldn't create fsl iommu_domain cache\n"); | 44 | pr_debug("Couldn't create fsl iommu_domain cache\n"); |
63 | return -ENOMEM; | 45 | return -ENOMEM; |
64 | } | 46 | } |
65 | 47 | ||
66 | iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", | 48 | iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", |
67 | sizeof(struct device_domain_info), | 49 | sizeof(struct device_domain_info), |
68 | 0, | 50 | 0, |
69 | SLAB_HWCACHE_ALIGN, | 51 | SLAB_HWCACHE_ALIGN, |
70 | NULL); | 52 | NULL); |
71 | if (!iommu_devinfo_cache) { | 53 | if (!iommu_devinfo_cache) { |
72 | pr_debug("Couldn't create devinfo cache\n"); | 54 | pr_debug("Couldn't create devinfo cache\n"); |
73 | kmem_cache_destroy(fsl_pamu_domain_cache); | 55 | kmem_cache_destroy(fsl_pamu_domain_cache); |
@@ -80,8 +62,7 @@ static int __init iommu_init_mempool(void) | |||
80 | static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) | 62 | static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) |
81 | { | 63 | { |
82 | u32 win_cnt = dma_domain->win_cnt; | 64 | u32 win_cnt = dma_domain->win_cnt; |
83 | struct dma_window *win_ptr = | 65 | struct dma_window *win_ptr = &dma_domain->win_arr[0]; |
84 | &dma_domain->win_arr[0]; | ||
85 | struct iommu_domain_geometry *geom; | 66 | struct iommu_domain_geometry *geom; |
86 | 67 | ||
87 | geom = &dma_domain->iommu_domain->geometry; | 68 | geom = &dma_domain->iommu_domain->geometry; |
@@ -103,22 +84,20 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i | |||
103 | } | 84 | } |
104 | 85 | ||
105 | if (win_ptr->valid) | 86 | if (win_ptr->valid) |
106 | return (win_ptr->paddr + (iova & (win_ptr->size - 1))); | 87 | return win_ptr->paddr + (iova & (win_ptr->size - 1)); |
107 | 88 | ||
108 | return 0; | 89 | return 0; |
109 | } | 90 | } |
110 | 91 | ||
111 | static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) | 92 | static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) |
112 | { | 93 | { |
113 | struct dma_window *sub_win_ptr = | 94 | struct dma_window *sub_win_ptr = &dma_domain->win_arr[0]; |
114 | &dma_domain->win_arr[0]; | ||
115 | int i, ret; | 95 | int i, ret; |
116 | unsigned long rpn, flags; | 96 | unsigned long rpn, flags; |
117 | 97 | ||
118 | for (i = 0; i < dma_domain->win_cnt; i++) { | 98 | for (i = 0; i < dma_domain->win_cnt; i++) { |
119 | if (sub_win_ptr[i].valid) { | 99 | if (sub_win_ptr[i].valid) { |
120 | rpn = sub_win_ptr[i].paddr >> | 100 | rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT; |
121 | PAMU_PAGE_SHIFT; | ||
122 | spin_lock_irqsave(&iommu_lock, flags); | 101 | spin_lock_irqsave(&iommu_lock, flags); |
123 | ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i, | 102 | ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i, |
124 | sub_win_ptr[i].size, | 103 | sub_win_ptr[i].size, |
@@ -130,7 +109,7 @@ static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) | |||
130 | sub_win_ptr[i].prot); | 109 | sub_win_ptr[i].prot); |
131 | spin_unlock_irqrestore(&iommu_lock, flags); | 110 | spin_unlock_irqrestore(&iommu_lock, flags); |
132 | if (ret) { | 111 | if (ret) { |
133 | pr_debug("PAMU SPAACE configuration failed for liodn %d\n", | 112 | pr_debug("SPAACE configuration failed for liodn %d\n", |
134 | liodn); | 113 | liodn); |
135 | return ret; | 114 | return ret; |
136 | } | 115 | } |
@@ -156,8 +135,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain) | |||
156 | 0, wnd->prot); | 135 | 0, wnd->prot); |
157 | spin_unlock_irqrestore(&iommu_lock, flags); | 136 | spin_unlock_irqrestore(&iommu_lock, flags); |
158 | if (ret) | 137 | if (ret) |
159 | pr_debug("PAMU PAACE configuration failed for liodn %d\n", | 138 | pr_debug("PAACE configuration failed for liodn %d\n", liodn); |
160 | liodn); | ||
161 | 139 | ||
162 | return ret; | 140 | return ret; |
163 | } | 141 | } |
@@ -169,7 +147,6 @@ static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) | |||
169 | return map_subwins(liodn, dma_domain); | 147 | return map_subwins(liodn, dma_domain); |
170 | else | 148 | else |
171 | return map_win(liodn, dma_domain); | 149 | return map_win(liodn, dma_domain); |
172 | |||
173 | } | 150 | } |
174 | 151 | ||
175 | /* Update window/subwindow mapping for the LIODN */ | 152 | /* Update window/subwindow mapping for the LIODN */ |
@@ -190,7 +167,8 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr | |||
190 | (wnd_nr > 0) ? 1 : 0, | 167 | (wnd_nr > 0) ? 1 : 0, |
191 | wnd->prot); | 168 | wnd->prot); |
192 | if (ret) | 169 | if (ret) |
193 | pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn); | 170 | pr_debug("Subwindow reconfiguration failed for liodn %d\n", |
171 | liodn); | ||
194 | } else { | 172 | } else { |
195 | phys_addr_t wnd_addr; | 173 | phys_addr_t wnd_addr; |
196 | 174 | ||
@@ -200,10 +178,11 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr | |||
200 | wnd->size, | 178 | wnd->size, |
201 | ~(u32)0, | 179 | ~(u32)0, |
202 | wnd->paddr >> PAMU_PAGE_SHIFT, | 180 | wnd->paddr >> PAMU_PAGE_SHIFT, |
203 | dma_domain->snoop_id, dma_domain->stash_id, | 181 | dma_domain->snoop_id, dma_domain->stash_id, |
204 | 0, wnd->prot); | 182 | 0, wnd->prot); |
205 | if (ret) | 183 | if (ret) |
206 | pr_debug("Window reconfiguration failed for liodn %d\n", liodn); | 184 | pr_debug("Window reconfiguration failed for liodn %d\n", |
185 | liodn); | ||
207 | } | 186 | } |
208 | 187 | ||
209 | spin_unlock_irqrestore(&iommu_lock, flags); | 188 | spin_unlock_irqrestore(&iommu_lock, flags); |
@@ -212,14 +191,15 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr | |||
212 | } | 191 | } |
213 | 192 | ||
214 | static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, | 193 | static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, |
215 | u32 val) | 194 | u32 val) |
216 | { | 195 | { |
217 | int ret = 0, i; | 196 | int ret = 0, i; |
218 | unsigned long flags; | 197 | unsigned long flags; |
219 | 198 | ||
220 | spin_lock_irqsave(&iommu_lock, flags); | 199 | spin_lock_irqsave(&iommu_lock, flags); |
221 | if (!dma_domain->win_arr) { | 200 | if (!dma_domain->win_arr) { |
222 | pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn); | 201 | pr_debug("Windows not configured, stash destination update failed for liodn %d\n", |
202 | liodn); | ||
223 | spin_unlock_irqrestore(&iommu_lock, flags); | 203 | spin_unlock_irqrestore(&iommu_lock, flags); |
224 | return -EINVAL; | 204 | return -EINVAL; |
225 | } | 205 | } |
@@ -227,7 +207,8 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, | |||
227 | for (i = 0; i < dma_domain->win_cnt; i++) { | 207 | for (i = 0; i < dma_domain->win_cnt; i++) { |
228 | ret = pamu_update_paace_stash(liodn, i, val); | 208 | ret = pamu_update_paace_stash(liodn, i, val); |
229 | if (ret) { | 209 | if (ret) { |
230 | pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn); | 210 | pr_debug("Failed to update SPAACE %d field for liodn %d\n ", |
211 | i, liodn); | ||
231 | spin_unlock_irqrestore(&iommu_lock, flags); | 212 | spin_unlock_irqrestore(&iommu_lock, flags); |
232 | return ret; | 213 | return ret; |
233 | } | 214 | } |
@@ -240,9 +221,9 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, | |||
240 | 221 | ||
241 | /* Set the geometry parameters for a LIODN */ | 222 | /* Set the geometry parameters for a LIODN */ |
242 | static int pamu_set_liodn(int liodn, struct device *dev, | 223 | static int pamu_set_liodn(int liodn, struct device *dev, |
243 | struct fsl_dma_domain *dma_domain, | 224 | struct fsl_dma_domain *dma_domain, |
244 | struct iommu_domain_geometry *geom_attr, | 225 | struct iommu_domain_geometry *geom_attr, |
245 | u32 win_cnt) | 226 | u32 win_cnt) |
246 | { | 227 | { |
247 | phys_addr_t window_addr, window_size; | 228 | phys_addr_t window_addr, window_size; |
248 | phys_addr_t subwin_size; | 229 | phys_addr_t subwin_size; |
@@ -268,7 +249,8 @@ static int pamu_set_liodn(int liodn, struct device *dev, | |||
268 | dma_domain->stash_id, win_cnt, 0); | 249 | dma_domain->stash_id, win_cnt, 0); |
269 | spin_unlock_irqrestore(&iommu_lock, flags); | 250 | spin_unlock_irqrestore(&iommu_lock, flags); |
270 | if (ret) { | 251 | if (ret) { |
271 | pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt); | 252 | pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n", |
253 | liodn, win_cnt); | ||
272 | return ret; | 254 | return ret; |
273 | } | 255 | } |
274 | 256 | ||
@@ -285,7 +267,8 @@ static int pamu_set_liodn(int liodn, struct device *dev, | |||
285 | 0, 0); | 267 | 0, 0); |
286 | spin_unlock_irqrestore(&iommu_lock, flags); | 268 | spin_unlock_irqrestore(&iommu_lock, flags); |
287 | if (ret) { | 269 | if (ret) { |
288 | pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn); | 270 | pr_debug("SPAACE configuration failed for liodn %d\n", |
271 | liodn); | ||
289 | return ret; | 272 | return ret; |
290 | } | 273 | } |
291 | } | 274 | } |
@@ -301,13 +284,13 @@ static int check_size(u64 size, dma_addr_t iova) | |||
301 | * to PAMU page size. | 284 | * to PAMU page size. |
302 | */ | 285 | */ |
303 | if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { | 286 | if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { |
304 | pr_debug("%s: size too small or not a power of two\n", __func__); | 287 | pr_debug("Size too small or not a power of two\n"); |
305 | return -EINVAL; | 288 | return -EINVAL; |
306 | } | 289 | } |
307 | 290 | ||
308 | /* iova must be page size aligned*/ | 291 | /* iova must be page size aligned */ |
309 | if (iova & (size - 1)) { | 292 | if (iova & (size - 1)) { |
310 | pr_debug("%s: address is not aligned with window size\n", __func__); | 293 | pr_debug("Address is not aligned with window size\n"); |
311 | return -EINVAL; | 294 | return -EINVAL; |
312 | } | 295 | } |
313 | 296 | ||
@@ -396,16 +379,15 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d | |||
396 | if (!dev->archdata.iommu_domain) | 379 | if (!dev->archdata.iommu_domain) |
397 | dev->archdata.iommu_domain = info; | 380 | dev->archdata.iommu_domain = info; |
398 | spin_unlock_irqrestore(&device_domain_lock, flags); | 381 | spin_unlock_irqrestore(&device_domain_lock, flags); |
399 | |||
400 | } | 382 | } |
401 | 383 | ||
402 | static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, | 384 | static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, |
403 | dma_addr_t iova) | 385 | dma_addr_t iova) |
404 | { | 386 | { |
405 | struct fsl_dma_domain *dma_domain = domain->priv; | 387 | struct fsl_dma_domain *dma_domain = domain->priv; |
406 | 388 | ||
407 | if ((iova < domain->geometry.aperture_start) || | 389 | if (iova < domain->geometry.aperture_start || |
408 | iova > (domain->geometry.aperture_end)) | 390 | iova > domain->geometry.aperture_end) |
409 | return 0; | 391 | return 0; |
410 | 392 | ||
411 | return get_phys_addr(dma_domain, iova); | 393 | return get_phys_addr(dma_domain, iova); |
@@ -460,7 +442,7 @@ static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain, | |||
460 | 442 | ||
461 | list_for_each_entry(info, &dma_domain->devices, link) { | 443 | list_for_each_entry(info, &dma_domain->devices, link) { |
462 | ret = pamu_set_liodn(info->liodn, info->dev, dma_domain, | 444 | ret = pamu_set_liodn(info->liodn, info->dev, dma_domain, |
463 | geom_attr, win_cnt); | 445 | geom_attr, win_cnt); |
464 | if (ret) | 446 | if (ret) |
465 | break; | 447 | break; |
466 | } | 448 | } |
@@ -543,7 +525,6 @@ static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) | |||
543 | } | 525 | } |
544 | 526 | ||
545 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | 527 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); |
546 | |||
547 | } | 528 | } |
548 | 529 | ||
549 | static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, | 530 | static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, |
@@ -576,7 +557,7 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, | |||
576 | 557 | ||
577 | win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt); | 558 | win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt); |
578 | if (size > win_size) { | 559 | if (size > win_size) { |
579 | pr_debug("Invalid window size \n"); | 560 | pr_debug("Invalid window size\n"); |
580 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | 561 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); |
581 | return -EINVAL; | 562 | return -EINVAL; |
582 | } | 563 | } |
@@ -622,8 +603,8 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, | |||
622 | * and window mappings. | 603 | * and window mappings. |
623 | */ | 604 | */ |
624 | static int handle_attach_device(struct fsl_dma_domain *dma_domain, | 605 | static int handle_attach_device(struct fsl_dma_domain *dma_domain, |
625 | struct device *dev, const u32 *liodn, | 606 | struct device *dev, const u32 *liodn, |
626 | int num) | 607 | int num) |
627 | { | 608 | { |
628 | unsigned long flags; | 609 | unsigned long flags; |
629 | struct iommu_domain *domain = dma_domain->iommu_domain; | 610 | struct iommu_domain *domain = dma_domain->iommu_domain; |
@@ -632,11 +613,10 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, | |||
632 | 613 | ||
633 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | 614 | spin_lock_irqsave(&dma_domain->domain_lock, flags); |
634 | for (i = 0; i < num; i++) { | 615 | for (i = 0; i < num; i++) { |
635 | |||
636 | /* Ensure that LIODN value is valid */ | 616 | /* Ensure that LIODN value is valid */ |
637 | if (liodn[i] >= PAACE_NUMBER_ENTRIES) { | 617 | if (liodn[i] >= PAACE_NUMBER_ENTRIES) { |
638 | pr_debug("Invalid liodn %d, attach device failed for %s\n", | 618 | pr_debug("Invalid liodn %d, attach device failed for %s\n", |
639 | liodn[i], dev->of_node->full_name); | 619 | liodn[i], dev->of_node->full_name); |
640 | ret = -EINVAL; | 620 | ret = -EINVAL; |
641 | break; | 621 | break; |
642 | } | 622 | } |
@@ -649,9 +629,9 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, | |||
649 | */ | 629 | */ |
650 | if (dma_domain->win_arr) { | 630 | if (dma_domain->win_arr) { |
651 | u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0; | 631 | u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0; |
632 | |||
652 | ret = pamu_set_liodn(liodn[i], dev, dma_domain, | 633 | ret = pamu_set_liodn(liodn[i], dev, dma_domain, |
653 | &domain->geometry, | 634 | &domain->geometry, win_cnt); |
654 | win_cnt); | ||
655 | if (ret) | 635 | if (ret) |
656 | break; | 636 | break; |
657 | if (dma_domain->mapped) { | 637 | if (dma_domain->mapped) { |
@@ -698,19 +678,18 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, | |||
698 | liodn = of_get_property(dev->of_node, "fsl,liodn", &len); | 678 | liodn = of_get_property(dev->of_node, "fsl,liodn", &len); |
699 | if (liodn) { | 679 | if (liodn) { |
700 | liodn_cnt = len / sizeof(u32); | 680 | liodn_cnt = len / sizeof(u32); |
701 | ret = handle_attach_device(dma_domain, dev, | 681 | ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt); |
702 | liodn, liodn_cnt); | ||
703 | } else { | 682 | } else { |
704 | pr_debug("missing fsl,liodn property at %s\n", | 683 | pr_debug("missing fsl,liodn property at %s\n", |
705 | dev->of_node->full_name); | 684 | dev->of_node->full_name); |
706 | ret = -EINVAL; | 685 | ret = -EINVAL; |
707 | } | 686 | } |
708 | 687 | ||
709 | return ret; | 688 | return ret; |
710 | } | 689 | } |
711 | 690 | ||
712 | static void fsl_pamu_detach_device(struct iommu_domain *domain, | 691 | static void fsl_pamu_detach_device(struct iommu_domain *domain, |
713 | struct device *dev) | 692 | struct device *dev) |
714 | { | 693 | { |
715 | struct fsl_dma_domain *dma_domain = domain->priv; | 694 | struct fsl_dma_domain *dma_domain = domain->priv; |
716 | const u32 *prop; | 695 | const u32 *prop; |
@@ -738,7 +717,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain, | |||
738 | detach_device(dev, dma_domain); | 717 | detach_device(dev, dma_domain); |
739 | else | 718 | else |
740 | pr_debug("missing fsl,liodn property at %s\n", | 719 | pr_debug("missing fsl,liodn property at %s\n", |
741 | dev->of_node->full_name); | 720 | dev->of_node->full_name); |
742 | } | 721 | } |
743 | 722 | ||
744 | static int configure_domain_geometry(struct iommu_domain *domain, void *data) | 723 | static int configure_domain_geometry(struct iommu_domain *domain, void *data) |
@@ -754,10 +733,10 @@ static int configure_domain_geometry(struct iommu_domain *domain, void *data) | |||
754 | * DMA outside of the geometry. | 733 | * DMA outside of the geometry. |
755 | */ | 734 | */ |
756 | if (check_size(geom_size, geom_attr->aperture_start) || | 735 | if (check_size(geom_size, geom_attr->aperture_start) || |
757 | !geom_attr->force_aperture) { | 736 | !geom_attr->force_aperture) { |
758 | pr_debug("Invalid PAMU geometry attributes\n"); | 737 | pr_debug("Invalid PAMU geometry attributes\n"); |
759 | return -EINVAL; | 738 | return -EINVAL; |
760 | } | 739 | } |
761 | 740 | ||
762 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | 741 | spin_lock_irqsave(&dma_domain->domain_lock, flags); |
763 | if (dma_domain->enabled) { | 742 | if (dma_domain->enabled) { |
@@ -786,7 +765,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) | |||
786 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | 765 | spin_lock_irqsave(&dma_domain->domain_lock, flags); |
787 | 766 | ||
788 | memcpy(&dma_domain->dma_stash, stash_attr, | 767 | memcpy(&dma_domain->dma_stash, stash_attr, |
789 | sizeof(struct pamu_stash_attribute)); | 768 | sizeof(struct pamu_stash_attribute)); |
790 | 769 | ||
791 | dma_domain->stash_id = get_stash_id(stash_attr->cache, | 770 | dma_domain->stash_id = get_stash_id(stash_attr->cache, |
792 | stash_attr->cpu); | 771 | stash_attr->cpu); |
@@ -803,7 +782,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) | |||
803 | return ret; | 782 | return ret; |
804 | } | 783 | } |
805 | 784 | ||
806 | /* Configure domain dma state i.e. enable/disable DMA*/ | 785 | /* Configure domain dma state i.e. enable/disable DMA */ |
807 | static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable) | 786 | static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable) |
808 | { | 787 | { |
809 | struct device_domain_info *info; | 788 | struct device_domain_info *info; |
@@ -819,8 +798,7 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en | |||
819 | } | 798 | } |
820 | 799 | ||
821 | dma_domain->enabled = enable; | 800 | dma_domain->enabled = enable; |
822 | list_for_each_entry(info, &dma_domain->devices, | 801 | list_for_each_entry(info, &dma_domain->devices, link) { |
823 | link) { | ||
824 | ret = (enable) ? pamu_enable_liodn(info->liodn) : | 802 | ret = (enable) ? pamu_enable_liodn(info->liodn) : |
825 | pamu_disable_liodn(info->liodn); | 803 | pamu_disable_liodn(info->liodn); |
826 | if (ret) | 804 | if (ret) |
@@ -833,12 +811,11 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en | |||
833 | } | 811 | } |
834 | 812 | ||
835 | static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, | 813 | static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, |
836 | enum iommu_attr attr_type, void *data) | 814 | enum iommu_attr attr_type, void *data) |
837 | { | 815 | { |
838 | struct fsl_dma_domain *dma_domain = domain->priv; | 816 | struct fsl_dma_domain *dma_domain = domain->priv; |
839 | int ret = 0; | 817 | int ret = 0; |
840 | 818 | ||
841 | |||
842 | switch (attr_type) { | 819 | switch (attr_type) { |
843 | case DOMAIN_ATTR_GEOMETRY: | 820 | case DOMAIN_ATTR_GEOMETRY: |
844 | ret = configure_domain_geometry(domain, data); | 821 | ret = configure_domain_geometry(domain, data); |
@@ -853,22 +830,21 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, | |||
853 | pr_debug("Unsupported attribute type\n"); | 830 | pr_debug("Unsupported attribute type\n"); |
854 | ret = -EINVAL; | 831 | ret = -EINVAL; |
855 | break; | 832 | break; |
856 | }; | 833 | } |
857 | 834 | ||
858 | return ret; | 835 | return ret; |
859 | } | 836 | } |
860 | 837 | ||
861 | static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, | 838 | static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, |
862 | enum iommu_attr attr_type, void *data) | 839 | enum iommu_attr attr_type, void *data) |
863 | { | 840 | { |
864 | struct fsl_dma_domain *dma_domain = domain->priv; | 841 | struct fsl_dma_domain *dma_domain = domain->priv; |
865 | int ret = 0; | 842 | int ret = 0; |
866 | 843 | ||
867 | |||
868 | switch (attr_type) { | 844 | switch (attr_type) { |
869 | case DOMAIN_ATTR_FSL_PAMU_STASH: | 845 | case DOMAIN_ATTR_FSL_PAMU_STASH: |
870 | memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash, | 846 | memcpy(data, &dma_domain->dma_stash, |
871 | sizeof(struct pamu_stash_attribute)); | 847 | sizeof(struct pamu_stash_attribute)); |
872 | break; | 848 | break; |
873 | case DOMAIN_ATTR_FSL_PAMU_ENABLE: | 849 | case DOMAIN_ATTR_FSL_PAMU_ENABLE: |
874 | *(int *)data = dma_domain->enabled; | 850 | *(int *)data = dma_domain->enabled; |
@@ -880,7 +856,7 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, | |||
880 | pr_debug("Unsupported attribute type\n"); | 856 | pr_debug("Unsupported attribute type\n"); |
881 | ret = -EINVAL; | 857 | ret = -EINVAL; |
882 | break; | 858 | break; |
883 | }; | 859 | } |
884 | 860 | ||
885 | return ret; | 861 | return ret; |
886 | } | 862 | } |
@@ -903,11 +879,8 @@ static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl) | |||
903 | /* Check the PCI controller version number by readding BRR1 register */ | 879 | /* Check the PCI controller version number by readding BRR1 register */ |
904 | version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); | 880 | version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); |
905 | version &= PCI_FSL_BRR1_VER; | 881 | version &= PCI_FSL_BRR1_VER; |
906 | /* If PCI controller version is >= 0x204 we can partition endpoints*/ | 882 | /* If PCI controller version is >= 0x204 we can partition endpoints */ |
907 | if (version >= 0x204) | 883 | return version >= 0x204; |
908 | return 1; | ||
909 | |||
910 | return 0; | ||
911 | } | 884 | } |
912 | 885 | ||
913 | /* Get iommu group information from peer devices or devices on the parent bus */ | 886 | /* Get iommu group information from peer devices or devices on the parent bus */ |
@@ -968,8 +941,9 @@ static struct iommu_group *get_pci_device_group(struct pci_dev *pdev) | |||
968 | if (pci_ctl->parent->iommu_group) { | 941 | if (pci_ctl->parent->iommu_group) { |
969 | group = get_device_iommu_group(pci_ctl->parent); | 942 | group = get_device_iommu_group(pci_ctl->parent); |
970 | iommu_group_remove_device(pci_ctl->parent); | 943 | iommu_group_remove_device(pci_ctl->parent); |
971 | } else | 944 | } else { |
972 | group = get_shared_pci_device_group(pdev); | 945 | group = get_shared_pci_device_group(pdev); |
946 | } | ||
973 | } | 947 | } |
974 | 948 | ||
975 | if (!group) | 949 | if (!group) |
@@ -1055,11 +1029,12 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) | |||
1055 | } | 1029 | } |
1056 | 1030 | ||
1057 | ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, | 1031 | ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, |
1058 | ((w_count > 1) ? w_count : 0)); | 1032 | w_count > 1 ? w_count : 0); |
1059 | if (!ret) { | 1033 | if (!ret) { |
1060 | kfree(dma_domain->win_arr); | 1034 | kfree(dma_domain->win_arr); |
1061 | dma_domain->win_arr = kzalloc(sizeof(struct dma_window) * | 1035 | dma_domain->win_arr = kcalloc(w_count, |
1062 | w_count, GFP_ATOMIC); | 1036 | sizeof(*dma_domain->win_arr), |
1037 | GFP_ATOMIC); | ||
1063 | if (!dma_domain->win_arr) { | 1038 | if (!dma_domain->win_arr) { |
1064 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | 1039 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); |
1065 | return -ENOMEM; | 1040 | return -ENOMEM; |
@@ -1095,7 +1070,7 @@ static const struct iommu_ops fsl_pamu_ops = { | |||
1095 | .remove_device = fsl_pamu_remove_device, | 1070 | .remove_device = fsl_pamu_remove_device, |
1096 | }; | 1071 | }; |
1097 | 1072 | ||
1098 | int pamu_domain_init(void) | 1073 | int __init pamu_domain_init(void) |
1099 | { | 1074 | { |
1100 | int ret = 0; | 1075 | int ret = 0; |
1101 | 1076 | ||