diff options
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
| -rw-r--r-- | arch/powerpc/kernel/iommu.c | 323 |
1 files changed, 323 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index c0d0dbddfba1..b20ff173a671 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
| @@ -36,6 +36,8 @@ | |||
| 36 | #include <linux/hash.h> | 36 | #include <linux/hash.h> |
| 37 | #include <linux/fault-inject.h> | 37 | #include <linux/fault-inject.h> |
| 38 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
| 39 | #include <linux/iommu.h> | ||
| 40 | #include <linux/sched.h> | ||
| 39 | #include <asm/io.h> | 41 | #include <asm/io.h> |
| 40 | #include <asm/prom.h> | 42 | #include <asm/prom.h> |
| 41 | #include <asm/iommu.h> | 43 | #include <asm/iommu.h> |
| @@ -44,6 +46,7 @@ | |||
| 44 | #include <asm/kdump.h> | 46 | #include <asm/kdump.h> |
| 45 | #include <asm/fadump.h> | 47 | #include <asm/fadump.h> |
| 46 | #include <asm/vio.h> | 48 | #include <asm/vio.h> |
| 49 | #include <asm/tce.h> | ||
| 47 | 50 | ||
| 48 | #define DBG(...) | 51 | #define DBG(...) |
| 49 | 52 | ||
| @@ -724,6 +727,13 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) | |||
| 724 | if (tbl->it_offset == 0) | 727 | if (tbl->it_offset == 0) |
| 725 | clear_bit(0, tbl->it_map); | 728 | clear_bit(0, tbl->it_map); |
| 726 | 729 | ||
| 730 | #ifdef CONFIG_IOMMU_API | ||
| 731 | if (tbl->it_group) { | ||
| 732 | iommu_group_put(tbl->it_group); | ||
| 733 | BUG_ON(tbl->it_group); | ||
| 734 | } | ||
| 735 | #endif | ||
| 736 | |||
| 727 | /* verify that table contains no entries */ | 737 | /* verify that table contains no entries */ |
| 728 | if (!bitmap_empty(tbl->it_map, tbl->it_size)) | 738 | if (!bitmap_empty(tbl->it_map, tbl->it_size)) |
| 729 | pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name); | 739 | pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name); |
| @@ -860,3 +870,316 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size, | |||
| 860 | free_pages((unsigned long)vaddr, get_order(size)); | 870 | free_pages((unsigned long)vaddr, get_order(size)); |
| 861 | } | 871 | } |
| 862 | } | 872 | } |
| 873 | |||
| 874 | #ifdef CONFIG_IOMMU_API | ||
| 875 | /* | ||
| 876 | * SPAPR TCE API | ||
| 877 | */ | ||
| 878 | static void group_release(void *iommu_data) | ||
| 879 | { | ||
| 880 | struct iommu_table *tbl = iommu_data; | ||
| 881 | tbl->it_group = NULL; | ||
| 882 | } | ||
| 883 | |||
| 884 | void iommu_register_group(struct iommu_table *tbl, | ||
| 885 | int pci_domain_number, unsigned long pe_num) | ||
| 886 | { | ||
| 887 | struct iommu_group *grp; | ||
| 888 | char *name; | ||
| 889 | |||
| 890 | grp = iommu_group_alloc(); | ||
| 891 | if (IS_ERR(grp)) { | ||
| 892 | pr_warn("powerpc iommu api: cannot create new group, err=%ld\n", | ||
| 893 | PTR_ERR(grp)); | ||
| 894 | return; | ||
| 895 | } | ||
| 896 | tbl->it_group = grp; | ||
| 897 | iommu_group_set_iommudata(grp, tbl, group_release); | ||
| 898 | name = kasprintf(GFP_KERNEL, "domain%d-pe%lx", | ||
| 899 | pci_domain_number, pe_num); | ||
| 900 | if (!name) | ||
| 901 | return; | ||
| 902 | iommu_group_set_name(grp, name); | ||
| 903 | kfree(name); | ||
| 904 | } | ||
| 905 | |||
| 906 | enum dma_data_direction iommu_tce_direction(unsigned long tce) | ||
| 907 | { | ||
| 908 | if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE)) | ||
| 909 | return DMA_BIDIRECTIONAL; | ||
| 910 | else if (tce & TCE_PCI_READ) | ||
| 911 | return DMA_TO_DEVICE; | ||
| 912 | else if (tce & TCE_PCI_WRITE) | ||
| 913 | return DMA_FROM_DEVICE; | ||
| 914 | else | ||
| 915 | return DMA_NONE; | ||
| 916 | } | ||
| 917 | EXPORT_SYMBOL_GPL(iommu_tce_direction); | ||
| 918 | |||
| 919 | void iommu_flush_tce(struct iommu_table *tbl) | ||
| 920 | { | ||
| 921 | /* Flush/invalidate TLB caches if necessary */ | ||
| 922 | if (ppc_md.tce_flush) | ||
| 923 | ppc_md.tce_flush(tbl); | ||
| 924 | |||
| 925 | /* Make sure updates are seen by hardware */ | ||
| 926 | mb(); | ||
| 927 | } | ||
| 928 | EXPORT_SYMBOL_GPL(iommu_flush_tce); | ||
| 929 | |||
| 930 | int iommu_tce_clear_param_check(struct iommu_table *tbl, | ||
| 931 | unsigned long ioba, unsigned long tce_value, | ||
| 932 | unsigned long npages) | ||
| 933 | { | ||
| 934 | /* ppc_md.tce_free() does not support any value but 0 */ | ||
| 935 | if (tce_value) | ||
| 936 | return -EINVAL; | ||
| 937 | |||
| 938 | if (ioba & ~IOMMU_PAGE_MASK) | ||
| 939 | return -EINVAL; | ||
| 940 | |||
| 941 | ioba >>= IOMMU_PAGE_SHIFT; | ||
| 942 | if (ioba < tbl->it_offset) | ||
| 943 | return -EINVAL; | ||
| 944 | |||
| 945 | if ((ioba + npages) > (tbl->it_offset + tbl->it_size)) | ||
| 946 | return -EINVAL; | ||
| 947 | |||
| 948 | return 0; | ||
| 949 | } | ||
| 950 | EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check); | ||
| 951 | |||
| 952 | int iommu_tce_put_param_check(struct iommu_table *tbl, | ||
| 953 | unsigned long ioba, unsigned long tce) | ||
| 954 | { | ||
| 955 | if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ))) | ||
| 956 | return -EINVAL; | ||
| 957 | |||
| 958 | if (tce & ~(IOMMU_PAGE_MASK | TCE_PCI_WRITE | TCE_PCI_READ)) | ||
| 959 | return -EINVAL; | ||
| 960 | |||
| 961 | if (ioba & ~IOMMU_PAGE_MASK) | ||
| 962 | return -EINVAL; | ||
| 963 | |||
| 964 | ioba >>= IOMMU_PAGE_SHIFT; | ||
| 965 | if (ioba < tbl->it_offset) | ||
| 966 | return -EINVAL; | ||
| 967 | |||
| 968 | if ((ioba + 1) > (tbl->it_offset + tbl->it_size)) | ||
| 969 | return -EINVAL; | ||
| 970 | |||
| 971 | return 0; | ||
| 972 | } | ||
| 973 | EXPORT_SYMBOL_GPL(iommu_tce_put_param_check); | ||
| 974 | |||
| 975 | unsigned long iommu_clear_tce(struct iommu_table *tbl, unsigned long entry) | ||
| 976 | { | ||
| 977 | unsigned long oldtce; | ||
| 978 | struct iommu_pool *pool = get_pool(tbl, entry); | ||
| 979 | |||
| 980 | spin_lock(&(pool->lock)); | ||
| 981 | |||
| 982 | oldtce = ppc_md.tce_get(tbl, entry); | ||
| 983 | if (oldtce & (TCE_PCI_WRITE | TCE_PCI_READ)) | ||
| 984 | ppc_md.tce_free(tbl, entry, 1); | ||
| 985 | else | ||
| 986 | oldtce = 0; | ||
| 987 | |||
| 988 | spin_unlock(&(pool->lock)); | ||
| 989 | |||
| 990 | return oldtce; | ||
| 991 | } | ||
| 992 | EXPORT_SYMBOL_GPL(iommu_clear_tce); | ||
| 993 | |||
| 994 | int iommu_clear_tces_and_put_pages(struct iommu_table *tbl, | ||
| 995 | unsigned long entry, unsigned long pages) | ||
| 996 | { | ||
| 997 | unsigned long oldtce; | ||
| 998 | struct page *page; | ||
| 999 | |||
| 1000 | for ( ; pages; --pages, ++entry) { | ||
| 1001 | oldtce = iommu_clear_tce(tbl, entry); | ||
| 1002 | if (!oldtce) | ||
| 1003 | continue; | ||
| 1004 | |||
| 1005 | page = pfn_to_page(oldtce >> PAGE_SHIFT); | ||
| 1006 | WARN_ON(!page); | ||
| 1007 | if (page) { | ||
| 1008 | if (oldtce & TCE_PCI_WRITE) | ||
| 1009 | SetPageDirty(page); | ||
| 1010 | put_page(page); | ||
| 1011 | } | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | return 0; | ||
| 1015 | } | ||
| 1016 | EXPORT_SYMBOL_GPL(iommu_clear_tces_and_put_pages); | ||
| 1017 | |||
| 1018 | /* | ||
| 1019 | * hwaddr is a kernel virtual address here (0xc... bazillion), | ||
| 1020 | * tce_build converts it to a physical address. | ||
| 1021 | */ | ||
| 1022 | int iommu_tce_build(struct iommu_table *tbl, unsigned long entry, | ||
| 1023 | unsigned long hwaddr, enum dma_data_direction direction) | ||
| 1024 | { | ||
| 1025 | int ret = -EBUSY; | ||
| 1026 | unsigned long oldtce; | ||
| 1027 | struct iommu_pool *pool = get_pool(tbl, entry); | ||
| 1028 | |||
| 1029 | spin_lock(&(pool->lock)); | ||
| 1030 | |||
| 1031 | oldtce = ppc_md.tce_get(tbl, entry); | ||
| 1032 | /* Add new entry if it is not busy */ | ||
| 1033 | if (!(oldtce & (TCE_PCI_WRITE | TCE_PCI_READ))) | ||
| 1034 | ret = ppc_md.tce_build(tbl, entry, 1, hwaddr, direction, NULL); | ||
| 1035 | |||
| 1036 | spin_unlock(&(pool->lock)); | ||
| 1037 | |||
| 1038 | /* if (unlikely(ret)) | ||
| 1039 | pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", | ||
| 1040 | __func__, hwaddr, entry << IOMMU_PAGE_SHIFT, | ||
| 1041 | hwaddr, ret); */ | ||
| 1042 | |||
| 1043 | return ret; | ||
| 1044 | } | ||
| 1045 | EXPORT_SYMBOL_GPL(iommu_tce_build); | ||
| 1046 | |||
| 1047 | int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry, | ||
| 1048 | unsigned long tce) | ||
| 1049 | { | ||
| 1050 | int ret; | ||
| 1051 | struct page *page = NULL; | ||
| 1052 | unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK & ~PAGE_MASK; | ||
| 1053 | enum dma_data_direction direction = iommu_tce_direction(tce); | ||
| 1054 | |||
| 1055 | ret = get_user_pages_fast(tce & PAGE_MASK, 1, | ||
| 1056 | direction != DMA_TO_DEVICE, &page); | ||
| 1057 | if (unlikely(ret != 1)) { | ||
| 1058 | /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n", | ||
| 1059 | tce, entry << IOMMU_PAGE_SHIFT, ret); */ | ||
| 1060 | return -EFAULT; | ||
| 1061 | } | ||
| 1062 | hwaddr = (unsigned long) page_address(page) + offset; | ||
| 1063 | |||
| 1064 | ret = iommu_tce_build(tbl, entry, hwaddr, direction); | ||
| 1065 | if (ret) | ||
| 1066 | put_page(page); | ||
| 1067 | |||
| 1068 | if (ret < 0) | ||
| 1069 | pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n", | ||
| 1070 | __func__, entry << IOMMU_PAGE_SHIFT, tce, ret); | ||
| 1071 | |||
| 1072 | return ret; | ||
| 1073 | } | ||
| 1074 | EXPORT_SYMBOL_GPL(iommu_put_tce_user_mode); | ||
| 1075 | |||
| 1076 | int iommu_take_ownership(struct iommu_table *tbl) | ||
| 1077 | { | ||
| 1078 | unsigned long sz = (tbl->it_size + 7) >> 3; | ||
| 1079 | |||
| 1080 | if (tbl->it_offset == 0) | ||
| 1081 | clear_bit(0, tbl->it_map); | ||
| 1082 | |||
| 1083 | if (!bitmap_empty(tbl->it_map, tbl->it_size)) { | ||
| 1084 | pr_err("iommu_tce: it_map is not empty"); | ||
| 1085 | return -EBUSY; | ||
| 1086 | } | ||
| 1087 | |||
| 1088 | memset(tbl->it_map, 0xff, sz); | ||
| 1089 | iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); | ||
| 1090 | |||
| 1091 | return 0; | ||
| 1092 | } | ||
| 1093 | EXPORT_SYMBOL_GPL(iommu_take_ownership); | ||
| 1094 | |||
| 1095 | void iommu_release_ownership(struct iommu_table *tbl) | ||
| 1096 | { | ||
| 1097 | unsigned long sz = (tbl->it_size + 7) >> 3; | ||
| 1098 | |||
| 1099 | iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); | ||
| 1100 | memset(tbl->it_map, 0, sz); | ||
| 1101 | |||
| 1102 | /* Restore bit#0 set by iommu_init_table() */ | ||
| 1103 | if (tbl->it_offset == 0) | ||
| 1104 | set_bit(0, tbl->it_map); | ||
| 1105 | } | ||
| 1106 | EXPORT_SYMBOL_GPL(iommu_release_ownership); | ||
| 1107 | |||
| 1108 | static int iommu_add_device(struct device *dev) | ||
| 1109 | { | ||
| 1110 | struct iommu_table *tbl; | ||
| 1111 | int ret = 0; | ||
| 1112 | |||
| 1113 | if (WARN_ON(dev->iommu_group)) { | ||
| 1114 | pr_warn("iommu_tce: device %s is already in iommu group %d, skipping\n", | ||
| 1115 | dev_name(dev), | ||
| 1116 | iommu_group_id(dev->iommu_group)); | ||
| 1117 | return -EBUSY; | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | tbl = get_iommu_table_base(dev); | ||
| 1121 | if (!tbl || !tbl->it_group) { | ||
| 1122 | pr_debug("iommu_tce: skipping device %s with no tbl\n", | ||
| 1123 | dev_name(dev)); | ||
| 1124 | return 0; | ||
| 1125 | } | ||
| 1126 | |||
| 1127 | pr_debug("iommu_tce: adding %s to iommu group %d\n", | ||
| 1128 | dev_name(dev), iommu_group_id(tbl->it_group)); | ||
| 1129 | |||
| 1130 | ret = iommu_group_add_device(tbl->it_group, dev); | ||
| 1131 | if (ret < 0) | ||
| 1132 | pr_err("iommu_tce: %s has not been added, ret=%d\n", | ||
| 1133 | dev_name(dev), ret); | ||
| 1134 | |||
| 1135 | return ret; | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | static void iommu_del_device(struct device *dev) | ||
| 1139 | { | ||
| 1140 | iommu_group_remove_device(dev); | ||
| 1141 | } | ||
| 1142 | |||
| 1143 | static int iommu_bus_notifier(struct notifier_block *nb, | ||
| 1144 | unsigned long action, void *data) | ||
| 1145 | { | ||
| 1146 | struct device *dev = data; | ||
| 1147 | |||
| 1148 | switch (action) { | ||
| 1149 | case BUS_NOTIFY_ADD_DEVICE: | ||
| 1150 | return iommu_add_device(dev); | ||
| 1151 | case BUS_NOTIFY_DEL_DEVICE: | ||
| 1152 | iommu_del_device(dev); | ||
| 1153 | return 0; | ||
| 1154 | default: | ||
| 1155 | return 0; | ||
| 1156 | } | ||
| 1157 | } | ||
| 1158 | |||
| 1159 | static struct notifier_block tce_iommu_bus_nb = { | ||
| 1160 | .notifier_call = iommu_bus_notifier, | ||
| 1161 | }; | ||
| 1162 | |||
| 1163 | static int __init tce_iommu_init(void) | ||
| 1164 | { | ||
| 1165 | struct pci_dev *pdev = NULL; | ||
| 1166 | |||
| 1167 | BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE); | ||
| 1168 | |||
| 1169 | for_each_pci_dev(pdev) | ||
| 1170 | iommu_add_device(&pdev->dev); | ||
| 1171 | |||
| 1172 | bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); | ||
| 1173 | return 0; | ||
| 1174 | } | ||
| 1175 | |||
| 1176 | subsys_initcall_sync(tce_iommu_init); | ||
| 1177 | |||
| 1178 | #else | ||
| 1179 | |||
| 1180 | void iommu_register_group(struct iommu_table *tbl, | ||
| 1181 | int pci_domain_number, unsigned long pe_num) | ||
| 1182 | { | ||
| 1183 | } | ||
| 1184 | |||
| 1185 | #endif /* CONFIG_IOMMU_API */ | ||
