aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2012-05-30 16:18:53 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2012-06-25 07:48:15 -0400
commitd72e31c9374627068df29da8085ca18c92ae35d3 (patch)
treeed8bb77e31c0dd2c2b9c84d31ef6860c6ff03e51 /drivers/iommu
parent74416e1e07660798379ce10a210bf4fd35b84f9f (diff)
iommu: IOMMU Groups
IOMMU device groups are currently a rather vague associative notion with assembly required by the user or user level driver provider to do anything useful. This patch intends to grow the IOMMU group concept into something a bit more consumable. To do this, we first create an object representing the group, struct iommu_group. This structure is allocated (iommu_group_alloc) and filled (iommu_group_add_device) by the iommu driver. The iommu driver is free to add devices to the group using it's own set of policies. This allows inclusion of devices based on physical hardware or topology limitations of the platform, as well as soft requirements, such as multi-function trust levels or peer-to-peer protection of the interconnects. Each device may only belong to a single iommu group, which is linked from struct device.iommu_group. IOMMU groups are maintained using kobject reference counting, allowing for automatic removal of empty, unreferenced groups. It is the responsibility of the iommu driver to remove devices from the group (iommu_group_remove_device). IOMMU groups also include a userspace representation in sysfs under /sys/kernel/iommu_groups. When allocated, each group is given a dynamically assign ID (int). The ID is managed by the core IOMMU group code to support multiple heterogeneous iommu drivers, which could potentially collide in group naming/numbering. This also keeps group IDs to small, easily managed values. A directory is created under /sys/kernel/iommu_groups for each group. A further subdirectory named "devices" contains links to each device within the group. The iommu_group file in the device's sysfs directory, which formerly contained a group number when read, is now a link to the iommu group. Example: $ ls -l /sys/kernel/iommu_groups/26/devices/ total 0 lrwxrwxrwx. 1 root root 0 Apr 17 12:57 0000:00:1e.0 -> ../../../../devices/pci0000:00/0000:00:1e.0 lrwxrwxrwx. 1 root root 0 Apr 17 12:57 0000:06:0d.0 -> ../../../../devices/pci0000:00/0000:00:1e.0/0000:06:0d.0 lrwxrwxrwx. 1 root root 0 Apr 17 12:57 0000:06:0d.1 -> ../../../../devices/pci0000:00/0000:00:1e.0/0000:06:0d.1 $ ls -l /sys/kernel/iommu_groups/26/devices/*/iommu_group [truncating perms/owner/timestamp] /sys/kernel/iommu_groups/26/devices/0000:00:1e.0/iommu_group -> ../../../kernel/iommu_groups/26 /sys/kernel/iommu_groups/26/devices/0000:06:0d.0/iommu_group -> ../../../../kernel/iommu_groups/26 /sys/kernel/iommu_groups/26/devices/0000:06:0d.1/iommu_group -> ../../../../kernel/iommu_groups/26 Groups also include several exported functions for use by user level driver providers, for example VFIO. These include: iommu_group_get(): Acquires a reference to a group from a device iommu_group_put(): Releases reference iommu_group_for_each_dev(): Iterates over group devices using callback iommu_group_[un]register_notifier(): Allows notification of device add and remove operations relevant to the group iommu_group_id(): Return the group number This patch also extends the IOMMU API to allow attaching groups to domains. This is currently a simple wrapper for iterating through devices within a group, but it's expected that the IOMMU API may eventually make groups a more integral part of domains. Groups intentionally do not try to manage group ownership. A user level driver provider must independently acquire ownership for each device within a group before making use of the group as a whole. This may change in the future if group usage becomes more pervasive across both DMA and IOMMU ops. Groups intentionally do not provide a mechanism for driver locking or otherwise manipulating driver matching/probing of devices within the group. Such interfaces are generic to devices and beyond the scope of IOMMU groups. If implemented, user level providers have ready access via iommu_group_for_each_dev and group notifiers. iommu_device_group() is removed here as it has no users. The replacement is: group = iommu_group_get(dev); id = iommu_group_id(group); iommu_group_put(group); AMD-Vi & Intel VT-d support re-added in following patches. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c21
-rw-r--r--drivers/iommu/intel-iommu.c49
-rw-r--r--drivers/iommu/iommu.c578
3 files changed, 548 insertions, 100 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a2e418cba0ff..55283d6291c8 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3227,26 +3227,6 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
3227 return 0; 3227 return 0;
3228} 3228}
3229 3229
3230static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
3231{
3232 struct iommu_dev_data *dev_data = dev->archdata.iommu;
3233 struct pci_dev *pdev = to_pci_dev(dev);
3234 u16 devid;
3235
3236 if (!dev_data)
3237 return -ENODEV;
3238
3239 if (pdev->is_virtfn || !iommu_group_mf)
3240 devid = dev_data->devid;
3241 else
3242 devid = calc_devid(pdev->bus->number,
3243 PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
3244
3245 *groupid = amd_iommu_alias_table[devid];
3246
3247 return 0;
3248}
3249
3250static struct iommu_ops amd_iommu_ops = { 3230static struct iommu_ops amd_iommu_ops = {
3251 .domain_init = amd_iommu_domain_init, 3231 .domain_init = amd_iommu_domain_init,
3252 .domain_destroy = amd_iommu_domain_destroy, 3232 .domain_destroy = amd_iommu_domain_destroy,
@@ -3256,7 +3236,6 @@ static struct iommu_ops amd_iommu_ops = {
3256 .unmap = amd_iommu_unmap, 3236 .unmap = amd_iommu_unmap,
3257 .iova_to_phys = amd_iommu_iova_to_phys, 3237 .iova_to_phys = amd_iommu_iova_to_phys,
3258 .domain_has_cap = amd_iommu_domain_has_cap, 3238 .domain_has_cap = amd_iommu_domain_has_cap,
3259 .device_group = amd_iommu_device_group,
3260 .pgsize_bitmap = AMD_IOMMU_PGSIZES, 3239 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
3261}; 3240};
3262 3241
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b12af2ff8c54..c62f2df25221 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4090,54 +4090,6 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4090 return 0; 4090 return 0;
4091} 4091}
4092 4092
4093/*
4094 * Group numbers are arbitrary. Device with the same group number
4095 * indicate the iommu cannot differentiate between them. To avoid
4096 * tracking used groups we just use the seg|bus|devfn of the lowest
4097 * level we're able to differentiate devices
4098 */
4099static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
4100{
4101 struct pci_dev *pdev = to_pci_dev(dev);
4102 struct pci_dev *bridge;
4103 union {
4104 struct {
4105 u8 devfn;
4106 u8 bus;
4107 u16 segment;
4108 } pci;
4109 u32 group;
4110 } id;
4111
4112 if (iommu_no_mapping(dev))
4113 return -ENODEV;
4114
4115 id.pci.segment = pci_domain_nr(pdev->bus);
4116 id.pci.bus = pdev->bus->number;
4117 id.pci.devfn = pdev->devfn;
4118
4119 if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
4120 return -ENODEV;
4121
4122 bridge = pci_find_upstream_pcie_bridge(pdev);
4123 if (bridge) {
4124 if (pci_is_pcie(bridge)) {
4125 id.pci.bus = bridge->subordinate->number;
4126 id.pci.devfn = 0;
4127 } else {
4128 id.pci.bus = bridge->bus->number;
4129 id.pci.devfn = bridge->devfn;
4130 }
4131 }
4132
4133 if (!pdev->is_virtfn && iommu_group_mf)
4134 id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
4135
4136 *groupid = id.group;
4137
4138 return 0;
4139}
4140
4141static struct iommu_ops intel_iommu_ops = { 4093static struct iommu_ops intel_iommu_ops = {
4142 .domain_init = intel_iommu_domain_init, 4094 .domain_init = intel_iommu_domain_init,
4143 .domain_destroy = intel_iommu_domain_destroy, 4095 .domain_destroy = intel_iommu_domain_destroy,
@@ -4147,7 +4099,6 @@ static struct iommu_ops intel_iommu_ops = {
4147 .unmap = intel_iommu_unmap, 4099 .unmap = intel_iommu_unmap,
4148 .iova_to_phys = intel_iommu_iova_to_phys, 4100 .iova_to_phys = intel_iommu_iova_to_phys,
4149 .domain_has_cap = intel_iommu_domain_has_cap, 4101 .domain_has_cap = intel_iommu_domain_has_cap,
4150 .device_group = intel_iommu_device_group,
4151 .pgsize_bitmap = INTEL_IOMMU_PGSIZES, 4102 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4152}; 4103};
4153 4104
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8b9ded88e6f5..0e928acd7dcf 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -26,60 +26,535 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/iommu.h> 28#include <linux/iommu.h>
29#include <linux/idr.h>
30#include <linux/notifier.h>
31#include <linux/err.h>
32
33static struct kset *iommu_group_kset;
34static struct ida iommu_group_ida;
35static struct mutex iommu_group_mutex;
36
37struct iommu_group {
38 struct kobject kobj;
39 struct kobject *devices_kobj;
40 struct list_head devices;
41 struct mutex mutex;
42 struct blocking_notifier_head notifier;
43 void *iommu_data;
44 void (*iommu_data_release)(void *iommu_data);
45 char *name;
46 int id;
47};
48
49struct iommu_device {
50 struct list_head list;
51 struct device *dev;
52 char *name;
53};
54
55struct iommu_group_attribute {
56 struct attribute attr;
57 ssize_t (*show)(struct iommu_group *group, char *buf);
58 ssize_t (*store)(struct iommu_group *group,
59 const char *buf, size_t count);
60};
61
62#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
63struct iommu_group_attribute iommu_group_attr_##_name = \
64 __ATTR(_name, _mode, _show, _store)
29 65
30static ssize_t show_iommu_group(struct device *dev, 66#define to_iommu_group_attr(_attr) \
31 struct device_attribute *attr, char *buf) 67 container_of(_attr, struct iommu_group_attribute, attr)
68#define to_iommu_group(_kobj) \
69 container_of(_kobj, struct iommu_group, kobj)
70
71static ssize_t iommu_group_attr_show(struct kobject *kobj,
72 struct attribute *__attr, char *buf)
32{ 73{
33 unsigned int groupid; 74 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
75 struct iommu_group *group = to_iommu_group(kobj);
76 ssize_t ret = -EIO;
34 77
35 if (iommu_device_group(dev, &groupid)) 78 if (attr->show)
36 return 0; 79 ret = attr->show(group, buf);
80 return ret;
81}
82
83static ssize_t iommu_group_attr_store(struct kobject *kobj,
84 struct attribute *__attr,
85 const char *buf, size_t count)
86{
87 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
88 struct iommu_group *group = to_iommu_group(kobj);
89 ssize_t ret = -EIO;
37 90
38 return sprintf(buf, "%u", groupid); 91 if (attr->store)
92 ret = attr->store(group, buf, count);
93 return ret;
39} 94}
40static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
41 95
42static int add_iommu_group(struct device *dev, void *data) 96static const struct sysfs_ops iommu_group_sysfs_ops = {
97 .show = iommu_group_attr_show,
98 .store = iommu_group_attr_store,
99};
100
101static int iommu_group_create_file(struct iommu_group *group,
102 struct iommu_group_attribute *attr)
103{
104 return sysfs_create_file(&group->kobj, &attr->attr);
105}
106
107static void iommu_group_remove_file(struct iommu_group *group,
108 struct iommu_group_attribute *attr)
109{
110 sysfs_remove_file(&group->kobj, &attr->attr);
111}
112
113static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
114{
115 return sprintf(buf, "%s\n", group->name);
116}
117
118static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
119
120static void iommu_group_release(struct kobject *kobj)
121{
122 struct iommu_group *group = to_iommu_group(kobj);
123
124 if (group->iommu_data_release)
125 group->iommu_data_release(group->iommu_data);
126
127 mutex_lock(&iommu_group_mutex);
128 ida_remove(&iommu_group_ida, group->id);
129 mutex_unlock(&iommu_group_mutex);
130
131 kfree(group->name);
132 kfree(group);
133}
134
135static struct kobj_type iommu_group_ktype = {
136 .sysfs_ops = &iommu_group_sysfs_ops,
137 .release = iommu_group_release,
138};
139
140/**
141 * iommu_group_alloc - Allocate a new group
142 * @name: Optional name to associate with group, visible in sysfs
143 *
144 * This function is called by an iommu driver to allocate a new iommu
145 * group. The iommu group represents the minimum granularity of the iommu.
146 * Upon successful return, the caller holds a reference to the supplied
147 * group in order to hold the group until devices are added. Use
148 * iommu_group_put() to release this extra reference count, allowing the
149 * group to be automatically reclaimed once it has no devices or external
150 * references.
151 */
152struct iommu_group *iommu_group_alloc(void)
43{ 153{
44 unsigned int groupid; 154 struct iommu_group *group;
155 int ret;
156
157 group = kzalloc(sizeof(*group), GFP_KERNEL);
158 if (!group)
159 return ERR_PTR(-ENOMEM);
160
161 group->kobj.kset = iommu_group_kset;
162 mutex_init(&group->mutex);
163 INIT_LIST_HEAD(&group->devices);
164 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
165
166 mutex_lock(&iommu_group_mutex);
167
168again:
169 if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
170 kfree(group);
171 mutex_unlock(&iommu_group_mutex);
172 return ERR_PTR(-ENOMEM);
173 }
174
175 if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
176 goto again;
177
178 mutex_unlock(&iommu_group_mutex);
45 179
46 if (iommu_device_group(dev, &groupid) == 0) 180 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
47 return device_create_file(dev, &dev_attr_iommu_group); 181 NULL, "%d", group->id);
182 if (ret) {
183 mutex_lock(&iommu_group_mutex);
184 ida_remove(&iommu_group_ida, group->id);
185 mutex_unlock(&iommu_group_mutex);
186 kfree(group);
187 return ERR_PTR(ret);
188 }
189
190 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
191 if (!group->devices_kobj) {
192 kobject_put(&group->kobj); /* triggers .release & free */
193 return ERR_PTR(-ENOMEM);
194 }
195
196 /*
197 * The devices_kobj holds a reference on the group kobject, so
198 * as long as that exists so will the group. We can therefore
199 * use the devices_kobj for reference counting.
200 */
201 kobject_put(&group->kobj);
202
203 return group;
204}
205EXPORT_SYMBOL_GPL(iommu_group_alloc);
206
207/**
208 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
209 * @group: the group
210 *
211 * iommu drivers can store data in the group for use when doing iommu
212 * operations. This function provides a way to retrieve it. Caller
213 * should hold a group reference.
214 */
215void *iommu_group_get_iommudata(struct iommu_group *group)
216{
217 return group->iommu_data;
218}
219EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
220
221/**
222 * iommu_group_set_iommudata - set iommu_data for a group
223 * @group: the group
224 * @iommu_data: new data
225 * @release: release function for iommu_data
226 *
227 * iommu drivers can store data in the group for use when doing iommu
228 * operations. This function provides a way to set the data after
229 * the group has been allocated. Caller should hold a group reference.
230 */
231void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
232 void (*release)(void *iommu_data))
233{
234 group->iommu_data = iommu_data;
235 group->iommu_data_release = release;
236}
237EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
238
239/**
240 * iommu_group_set_name - set name for a group
241 * @group: the group
242 * @name: name
243 *
244 * Allow iommu driver to set a name for a group. When set it will
245 * appear in a name attribute file under the group in sysfs.
246 */
247int iommu_group_set_name(struct iommu_group *group, const char *name)
248{
249 int ret;
250
251 if (group->name) {
252 iommu_group_remove_file(group, &iommu_group_attr_name);
253 kfree(group->name);
254 group->name = NULL;
255 if (!name)
256 return 0;
257 }
258
259 group->name = kstrdup(name, GFP_KERNEL);
260 if (!group->name)
261 return -ENOMEM;
262
263 ret = iommu_group_create_file(group, &iommu_group_attr_name);
264 if (ret) {
265 kfree(group->name);
266 group->name = NULL;
267 return ret;
268 }
48 269
49 return 0; 270 return 0;
50} 271}
272EXPORT_SYMBOL_GPL(iommu_group_set_name);
51 273
52static int remove_iommu_group(struct device *dev) 274/**
275 * iommu_group_add_device - add a device to an iommu group
276 * @group: the group into which to add the device (reference should be held)
277 * @dev: the device
278 *
279 * This function is called by an iommu driver to add a device into a
280 * group. Adding a device increments the group reference count.
281 */
282int iommu_group_add_device(struct iommu_group *group, struct device *dev)
53{ 283{
54 unsigned int groupid; 284 int ret, i = 0;
285 struct iommu_device *device;
286
287 device = kzalloc(sizeof(*device), GFP_KERNEL);
288 if (!device)
289 return -ENOMEM;
290
291 device->dev = dev;
55 292
56 if (iommu_device_group(dev, &groupid) == 0) 293 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
57 device_remove_file(dev, &dev_attr_iommu_group); 294 if (ret) {
295 kfree(device);
296 return ret;
297 }
298
299 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
300rename:
301 if (!device->name) {
302 sysfs_remove_link(&dev->kobj, "iommu_group");
303 kfree(device);
304 return -ENOMEM;
305 }
58 306
307 ret = sysfs_create_link_nowarn(group->devices_kobj,
308 &dev->kobj, device->name);
309 if (ret) {
310 kfree(device->name);
311 if (ret == -EEXIST && i >= 0) {
312 /*
313 * Account for the slim chance of collision
314 * and append an instance to the name.
315 */
316 device->name = kasprintf(GFP_KERNEL, "%s.%d",
317 kobject_name(&dev->kobj), i++);
318 goto rename;
319 }
320
321 sysfs_remove_link(&dev->kobj, "iommu_group");
322 kfree(device);
323 return ret;
324 }
325
326 kobject_get(group->devices_kobj);
327
328 dev->iommu_group = group;
329
330 mutex_lock(&group->mutex);
331 list_add_tail(&device->list, &group->devices);
332 mutex_unlock(&group->mutex);
333
334 /* Notify any listeners about change to group. */
335 blocking_notifier_call_chain(&group->notifier,
336 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
59 return 0; 337 return 0;
60} 338}
339EXPORT_SYMBOL_GPL(iommu_group_add_device);
61 340
62static int iommu_device_notifier(struct notifier_block *nb, 341/**
63 unsigned long action, void *data) 342 * iommu_group_remove_device - remove a device from it's current group
343 * @dev: device to be removed
344 *
345 * This function is called by an iommu driver to remove the device from
346 * it's current group. This decrements the iommu group reference count.
347 */
348void iommu_group_remove_device(struct device *dev)
349{
350 struct iommu_group *group = dev->iommu_group;
351 struct iommu_device *tmp_device, *device = NULL;
352
353 /* Pre-notify listeners that a device is being removed. */
354 blocking_notifier_call_chain(&group->notifier,
355 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
356
357 mutex_lock(&group->mutex);
358 list_for_each_entry(tmp_device, &group->devices, list) {
359 if (tmp_device->dev == dev) {
360 device = tmp_device;
361 list_del(&device->list);
362 break;
363 }
364 }
365 mutex_unlock(&group->mutex);
366
367 if (!device)
368 return;
369
370 sysfs_remove_link(group->devices_kobj, device->name);
371 sysfs_remove_link(&dev->kobj, "iommu_group");
372
373 kfree(device->name);
374 kfree(device);
375 dev->iommu_group = NULL;
376 kobject_put(group->devices_kobj);
377}
378EXPORT_SYMBOL_GPL(iommu_group_remove_device);
379
380/**
381 * iommu_group_for_each_dev - iterate over each device in the group
382 * @group: the group
383 * @data: caller opaque data to be passed to callback function
384 * @fn: caller supplied callback function
385 *
386 * This function is called by group users to iterate over group devices.
387 * Callers should hold a reference count to the group during callback.
388 * The group->mutex is held across callbacks, which will block calls to
389 * iommu_group_add/remove_device.
390 */
391int iommu_group_for_each_dev(struct iommu_group *group, void *data,
392 int (*fn)(struct device *, void *))
393{
394 struct iommu_device *device;
395 int ret = 0;
396
397 mutex_lock(&group->mutex);
398 list_for_each_entry(device, &group->devices, list) {
399 ret = fn(device->dev, data);
400 if (ret)
401 break;
402 }
403 mutex_unlock(&group->mutex);
404 return ret;
405}
406EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
407
408/**
409 * iommu_group_get - Return the group for a device and increment reference
410 * @dev: get the group that this device belongs to
411 *
412 * This function is called by iommu drivers and users to get the group
413 * for the specified device. If found, the group is returned and the group
414 * reference in incremented, else NULL.
415 */
416struct iommu_group *iommu_group_get(struct device *dev)
417{
418 struct iommu_group *group = dev->iommu_group;
419
420 if (group)
421 kobject_get(group->devices_kobj);
422
423 return group;
424}
425EXPORT_SYMBOL_GPL(iommu_group_get);
426
427/**
428 * iommu_group_put - Decrement group reference
429 * @group: the group to use
430 *
431 * This function is called by iommu drivers and users to release the
432 * iommu group. Once the reference count is zero, the group is released.
433 */
434void iommu_group_put(struct iommu_group *group)
435{
436 if (group)
437 kobject_put(group->devices_kobj);
438}
439EXPORT_SYMBOL_GPL(iommu_group_put);
440
441/**
442 * iommu_group_register_notifier - Register a notifier for group changes
443 * @group: the group to watch
444 * @nb: notifier block to signal
445 *
446 * This function allows iommu group users to track changes in a group.
447 * See include/linux/iommu.h for actions sent via this notifier. Caller
448 * should hold a reference to the group throughout notifier registration.
449 */
450int iommu_group_register_notifier(struct iommu_group *group,
451 struct notifier_block *nb)
452{
453 return blocking_notifier_chain_register(&group->notifier, nb);
454}
455EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
456
457/**
458 * iommu_group_unregister_notifier - Unregister a notifier
459 * @group: the group to watch
460 * @nb: notifier block to signal
461 *
462 * Unregister a previously registered group notifier block.
463 */
464int iommu_group_unregister_notifier(struct iommu_group *group,
465 struct notifier_block *nb)
466{
467 return blocking_notifier_chain_unregister(&group->notifier, nb);
468}
469EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
470
471/**
472 * iommu_group_id - Return ID for a group
473 * @group: the group to ID
474 *
475 * Return the unique ID for the group matching the sysfs group number.
476 */
477int iommu_group_id(struct iommu_group *group)
478{
479 return group->id;
480}
481EXPORT_SYMBOL_GPL(iommu_group_id);
482
483static int add_iommu_group(struct device *dev, void *data)
484{
485 struct iommu_ops *ops = data;
486
487 if (!ops->add_device)
488 return -ENODEV;
489
490 WARN_ON(dev->iommu_group);
491
492 ops->add_device(dev);
493
494 return 0;
495}
496
497static int iommu_bus_notifier(struct notifier_block *nb,
498 unsigned long action, void *data)
64{ 499{
65 struct device *dev = data; 500 struct device *dev = data;
501 struct iommu_ops *ops = dev->bus->iommu_ops;
502 struct iommu_group *group;
503 unsigned long group_action = 0;
504
505 /*
506 * ADD/DEL call into iommu driver ops if provided, which may
507 * result in ADD/DEL notifiers to group->notifier
508 */
509 if (action == BUS_NOTIFY_ADD_DEVICE) {
510 if (ops->add_device)
511 return ops->add_device(dev);
512 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
513 if (ops->remove_device && dev->iommu_group) {
514 ops->remove_device(dev);
515 return 0;
516 }
517 }
66 518
67 if (action == BUS_NOTIFY_ADD_DEVICE) 519 /*
68 return add_iommu_group(dev, NULL); 520 * Remaining BUS_NOTIFYs get filtered and republished to the
69 else if (action == BUS_NOTIFY_DEL_DEVICE) 521 * group, if anyone is listening
70 return remove_iommu_group(dev); 522 */
523 group = iommu_group_get(dev);
524 if (!group)
525 return 0;
71 526
527 switch (action) {
528 case BUS_NOTIFY_BIND_DRIVER:
529 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
530 break;
531 case BUS_NOTIFY_BOUND_DRIVER:
532 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
533 break;
534 case BUS_NOTIFY_UNBIND_DRIVER:
535 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
536 break;
537 case BUS_NOTIFY_UNBOUND_DRIVER:
538 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
539 break;
540 }
541
542 if (group_action)
543 blocking_notifier_call_chain(&group->notifier,
544 group_action, dev);
545
546 iommu_group_put(group);
72 return 0; 547 return 0;
73} 548}
74 549
75static struct notifier_block iommu_device_nb = { 550static struct notifier_block iommu_bus_nb = {
76 .notifier_call = iommu_device_notifier, 551 .notifier_call = iommu_bus_notifier,
77}; 552};
78 553
79static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) 554static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
80{ 555{
81 bus_register_notifier(bus, &iommu_device_nb); 556 bus_register_notifier(bus, &iommu_bus_nb);
82 bus_for_each_dev(bus, NULL, NULL, add_iommu_group); 557 bus_for_each_dev(bus, NULL, ops, add_iommu_group);
83} 558}
84 559
85/** 560/**
@@ -192,6 +667,45 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
192} 667}
193EXPORT_SYMBOL_GPL(iommu_detach_device); 668EXPORT_SYMBOL_GPL(iommu_detach_device);
194 669
670/*
671 * IOMMU groups are really the natrual working unit of the IOMMU, but
672 * the IOMMU API works on domains and devices. Bridge that gap by
673 * iterating over the devices in a group. Ideally we'd have a single
674 * device which represents the requestor ID of the group, but we also
675 * allow IOMMU drivers to create policy defined minimum sets, where
676 * the physical hardware may be able to distiguish members, but we
677 * wish to group them at a higher level (ex. untrusted multi-function
678 * PCI devices). Thus we attach each device.
679 */
680static int iommu_group_do_attach_device(struct device *dev, void *data)
681{
682 struct iommu_domain *domain = data;
683
684 return iommu_attach_device(domain, dev);
685}
686
687int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
688{
689 return iommu_group_for_each_dev(group, domain,
690 iommu_group_do_attach_device);
691}
692EXPORT_SYMBOL_GPL(iommu_attach_group);
693
694static int iommu_group_do_detach_device(struct device *dev, void *data)
695{
696 struct iommu_domain *domain = data;
697
698 iommu_detach_device(domain, dev);
699
700 return 0;
701}
702
703void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
704{
705 iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
706}
707EXPORT_SYMBOL_GPL(iommu_detach_group);
708
195phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 709phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
196 unsigned long iova) 710 unsigned long iova)
197{ 711{
@@ -336,11 +850,15 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
336} 850}
337EXPORT_SYMBOL_GPL(iommu_unmap); 851EXPORT_SYMBOL_GPL(iommu_unmap);
338 852
339int iommu_device_group(struct device *dev, unsigned int *groupid) 853static int __init iommu_init(void)
340{ 854{
341 if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group) 855 iommu_group_kset = kset_create_and_add("iommu_groups",
342 return dev->bus->iommu_ops->device_group(dev, groupid); 856 NULL, kernel_kobj);
857 ida_init(&iommu_group_ida);
858 mutex_init(&iommu_group_mutex);
343 859
344 return -ENODEV; 860 BUG_ON(!iommu_group_kset);
861
862 return 0;
345} 863}
346EXPORT_SYMBOL_GPL(iommu_device_group); 864subsys_initcall(iommu_init);