aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/iommu.c')
-rw-r--r--drivers/iommu/iommu.c177
1 files changed, 166 insertions, 11 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 5b5fa5cdaa3..2198b2dbbcd 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -16,6 +16,8 @@
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 17 */
18 18
19#define pr_fmt(fmt) "%s: " fmt, __func__
20
19#include <linux/device.h> 21#include <linux/device.h>
20#include <linux/kernel.h> 22#include <linux/kernel.h>
21#include <linux/bug.h> 23#include <linux/bug.h>
@@ -25,8 +27,59 @@
25#include <linux/errno.h> 27#include <linux/errno.h>
26#include <linux/iommu.h> 28#include <linux/iommu.h>
27 29
30static ssize_t show_iommu_group(struct device *dev,
31 struct device_attribute *attr, char *buf)
32{
33 unsigned int groupid;
34
35 if (iommu_device_group(dev, &groupid))
36 return 0;
37
38 return sprintf(buf, "%u", groupid);
39}
40static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
41
42static int add_iommu_group(struct device *dev, void *data)
43{
44 unsigned int groupid;
45
46 if (iommu_device_group(dev, &groupid) == 0)
47 return device_create_file(dev, &dev_attr_iommu_group);
48
49 return 0;
50}
51
52static int remove_iommu_group(struct device *dev)
53{
54 unsigned int groupid;
55
56 if (iommu_device_group(dev, &groupid) == 0)
57 device_remove_file(dev, &dev_attr_iommu_group);
58
59 return 0;
60}
61
62static int iommu_device_notifier(struct notifier_block *nb,
63 unsigned long action, void *data)
64{
65 struct device *dev = data;
66
67 if (action == BUS_NOTIFY_ADD_DEVICE)
68 return add_iommu_group(dev, NULL);
69 else if (action == BUS_NOTIFY_DEL_DEVICE)
70 return remove_iommu_group(dev);
71
72 return 0;
73}
74
75static struct notifier_block iommu_device_nb = {
76 .notifier_call = iommu_device_notifier,
77};
78
28static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) 79static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
29{ 80{
81 bus_register_notifier(bus, &iommu_device_nb);
82 bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
30} 83}
31 84
32/** 85/**
@@ -157,32 +210,134 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
157EXPORT_SYMBOL_GPL(iommu_domain_has_cap); 210EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
158 211
159int iommu_map(struct iommu_domain *domain, unsigned long iova, 212int iommu_map(struct iommu_domain *domain, unsigned long iova,
160 phys_addr_t paddr, int gfp_order, int prot) 213 phys_addr_t paddr, size_t size, int prot)
161{ 214{
162 size_t size; 215 unsigned long orig_iova = iova;
216 unsigned int min_pagesz;
217 size_t orig_size = size;
218 int ret = 0;
163 219
164 if (unlikely(domain->ops->map == NULL)) 220 if (unlikely(domain->ops->map == NULL))
165 return -ENODEV; 221 return -ENODEV;
166 222
167 size = PAGE_SIZE << gfp_order; 223 /* find out the minimum page size supported */
224 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
225
226 /*
227 * both the virtual address and the physical one, as well as
228 * the size of the mapping, must be aligned (at least) to the
229 * size of the smallest page supported by the hardware
230 */
231 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
232 pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
233 "0x%x\n", iova, (unsigned long)paddr,
234 (unsigned long)size, min_pagesz);
235 return -EINVAL;
236 }
237
238 pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
239 (unsigned long)paddr, (unsigned long)size);
240
241 while (size) {
242 unsigned long pgsize, addr_merge = iova | paddr;
243 unsigned int pgsize_idx;
244
245 /* Max page size that still fits into 'size' */
246 pgsize_idx = __fls(size);
247
248 /* need to consider alignment requirements ? */
249 if (likely(addr_merge)) {
250 /* Max page size allowed by both iova and paddr */
251 unsigned int align_pgsize_idx = __ffs(addr_merge);
252
253 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
254 }
255
256 /* build a mask of acceptable page sizes */
257 pgsize = (1UL << (pgsize_idx + 1)) - 1;
168 258
169 BUG_ON(!IS_ALIGNED(iova | paddr, size)); 259 /* throw away page sizes not supported by the hardware */
260 pgsize &= domain->ops->pgsize_bitmap;
170 261
171 return domain->ops->map(domain, iova, paddr, gfp_order, prot); 262 /* make sure we're still sane */
263 BUG_ON(!pgsize);
264
265 /* pick the biggest page */
266 pgsize_idx = __fls(pgsize);
267 pgsize = 1UL << pgsize_idx;
268
269 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
270 (unsigned long)paddr, pgsize);
271
272 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
273 if (ret)
274 break;
275
276 iova += pgsize;
277 paddr += pgsize;
278 size -= pgsize;
279 }
280
281 /* unroll mapping in case something went wrong */
282 if (ret)
283 iommu_unmap(domain, orig_iova, orig_size - size);
284
285 return ret;
172} 286}
173EXPORT_SYMBOL_GPL(iommu_map); 287EXPORT_SYMBOL_GPL(iommu_map);
174 288
175int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) 289size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
176{ 290{
177 size_t size; 291 size_t unmapped_page, unmapped = 0;
292 unsigned int min_pagesz;
178 293
179 if (unlikely(domain->ops->unmap == NULL)) 294 if (unlikely(domain->ops->unmap == NULL))
180 return -ENODEV; 295 return -ENODEV;
181 296
182 size = PAGE_SIZE << gfp_order; 297 /* find out the minimum page size supported */
298 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
299
300 /*
301 * The virtual address, as well as the size of the mapping, must be
302 * aligned (at least) to the size of the smallest page supported
303 * by the hardware
304 */
305 if (!IS_ALIGNED(iova | size, min_pagesz)) {
306 pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
307 iova, (unsigned long)size, min_pagesz);
308 return -EINVAL;
309 }
310
311 pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
312 (unsigned long)size);
313
314 /*
315 * Keep iterating until we either unmap 'size' bytes (or more)
316 * or we hit an area that isn't mapped.
317 */
318 while (unmapped < size) {
319 size_t left = size - unmapped;
320
321 unmapped_page = domain->ops->unmap(domain, iova, left);
322 if (!unmapped_page)
323 break;
324
325 pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
326 (unsigned long)unmapped_page);
327
328 iova += unmapped_page;
329 unmapped += unmapped_page;
330 }
331
332 return unmapped;
333}
334EXPORT_SYMBOL_GPL(iommu_unmap);
183 335
184 BUG_ON(!IS_ALIGNED(iova, size)); 336int iommu_device_group(struct device *dev, unsigned int *groupid)
337{
338 if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
339 return dev->bus->iommu_ops->device_group(dev, groupid);
185 340
186 return domain->ops->unmap(domain, iova, gfp_order); 341 return -ENODEV;
187} 342}
188EXPORT_SYMBOL_GPL(iommu_unmap); 343EXPORT_SYMBOL_GPL(iommu_device_group);