diff options
author | Antonios Motakis <a.motakis@virtualopensystems.com> | 2015-03-16 16:08:50 -0400 |
---|---|---|
committer | Alex Williamson <alex.williamson@redhat.com> | 2015-03-16 16:08:50 -0400 |
commit | 06211b40ce6b63903fe03831fd075a25630dc856 (patch) | |
tree | e2b486e429ab95e7813d052ad321933057282880 /drivers/vfio | |
parent | 57f972e2b341dd6a73533f9293ec55d584a5d833 (diff) |
vfio/platform: support for level sensitive interrupts
Level sensitive interrupts are exposed as maskable and automasked
interrupts and are masked and disabled automatically when they fire.
Signed-off-by: Antonios Motakis <a.motakis@virtualopensystems.com>
[Baptiste Reynal: Move masked interrupt initialization from "vfio/platform:
trigger an interrupt via eventfd"]
Signed-off-by: Baptiste Reynal <b.reynal@virtualopensystems.com>
Reviewed-by: Eric Auger <eric.auger@linaro.org>
Tested-by: Eric Auger <eric.auger@linaro.org>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'drivers/vfio')
-rw-r--r-- | drivers/vfio/platform/vfio_platform_irq.c | 103 | ||||
-rw-r--r-- | drivers/vfio/platform/vfio_platform_private.h | 2 |
2 files changed, 102 insertions, 3 deletions
diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c index 611ec80db63a..e0e638841d0b 100644 --- a/drivers/vfio/platform/vfio_platform_irq.c +++ b/drivers/vfio/platform/vfio_platform_irq.c | |||
@@ -23,12 +23,59 @@ | |||
23 | 23 | ||
24 | #include "vfio_platform_private.h" | 24 | #include "vfio_platform_private.h" |
25 | 25 | ||
26 | static void vfio_platform_mask(struct vfio_platform_irq *irq_ctx) | ||
27 | { | ||
28 | unsigned long flags; | ||
29 | |||
30 | spin_lock_irqsave(&irq_ctx->lock, flags); | ||
31 | |||
32 | if (!irq_ctx->masked) { | ||
33 | disable_irq_nosync(irq_ctx->hwirq); | ||
34 | irq_ctx->masked = true; | ||
35 | } | ||
36 | |||
37 | spin_unlock_irqrestore(&irq_ctx->lock, flags); | ||
38 | } | ||
39 | |||
26 | static int vfio_platform_set_irq_mask(struct vfio_platform_device *vdev, | 40 | static int vfio_platform_set_irq_mask(struct vfio_platform_device *vdev, |
27 | unsigned index, unsigned start, | 41 | unsigned index, unsigned start, |
28 | unsigned count, uint32_t flags, | 42 | unsigned count, uint32_t flags, |
29 | void *data) | 43 | void *data) |
30 | { | 44 | { |
31 | return -EINVAL; | 45 | if (start != 0 || count != 1) |
46 | return -EINVAL; | ||
47 | |||
48 | if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE)) | ||
49 | return -EINVAL; | ||
50 | |||
51 | if (flags & VFIO_IRQ_SET_DATA_EVENTFD) | ||
52 | return -EINVAL; /* not implemented yet */ | ||
53 | |||
54 | if (flags & VFIO_IRQ_SET_DATA_NONE) { | ||
55 | vfio_platform_mask(&vdev->irqs[index]); | ||
56 | |||
57 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { | ||
58 | uint8_t mask = *(uint8_t *)data; | ||
59 | |||
60 | if (mask) | ||
61 | vfio_platform_mask(&vdev->irqs[index]); | ||
62 | } | ||
63 | |||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | static void vfio_platform_unmask(struct vfio_platform_irq *irq_ctx) | ||
68 | { | ||
69 | unsigned long flags; | ||
70 | |||
71 | spin_lock_irqsave(&irq_ctx->lock, flags); | ||
72 | |||
73 | if (irq_ctx->masked) { | ||
74 | enable_irq(irq_ctx->hwirq); | ||
75 | irq_ctx->masked = false; | ||
76 | } | ||
77 | |||
78 | spin_unlock_irqrestore(&irq_ctx->lock, flags); | ||
32 | } | 79 | } |
33 | 80 | ||
34 | static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev, | 81 | static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev, |
@@ -36,7 +83,50 @@ static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev, | |||
36 | unsigned count, uint32_t flags, | 83 | unsigned count, uint32_t flags, |
37 | void *data) | 84 | void *data) |
38 | { | 85 | { |
39 | return -EINVAL; | 86 | if (start != 0 || count != 1) |
87 | return -EINVAL; | ||
88 | |||
89 | if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE)) | ||
90 | return -EINVAL; | ||
91 | |||
92 | if (flags & VFIO_IRQ_SET_DATA_EVENTFD) | ||
93 | return -EINVAL; /* not implemented yet */ | ||
94 | |||
95 | if (flags & VFIO_IRQ_SET_DATA_NONE) { | ||
96 | vfio_platform_unmask(&vdev->irqs[index]); | ||
97 | |||
98 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { | ||
99 | uint8_t unmask = *(uint8_t *)data; | ||
100 | |||
101 | if (unmask) | ||
102 | vfio_platform_unmask(&vdev->irqs[index]); | ||
103 | } | ||
104 | |||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) | ||
109 | { | ||
110 | struct vfio_platform_irq *irq_ctx = dev_id; | ||
111 | unsigned long flags; | ||
112 | int ret = IRQ_NONE; | ||
113 | |||
114 | spin_lock_irqsave(&irq_ctx->lock, flags); | ||
115 | |||
116 | if (!irq_ctx->masked) { | ||
117 | ret = IRQ_HANDLED; | ||
118 | |||
119 | /* automask maskable interrupts */ | ||
120 | disable_irq_nosync(irq_ctx->hwirq); | ||
121 | irq_ctx->masked = true; | ||
122 | } | ||
123 | |||
124 | spin_unlock_irqrestore(&irq_ctx->lock, flags); | ||
125 | |||
126 | if (ret == IRQ_HANDLED) | ||
127 | eventfd_signal(irq_ctx->trigger, 1); | ||
128 | |||
129 | return ret; | ||
40 | } | 130 | } |
41 | 131 | ||
42 | static irqreturn_t vfio_irq_handler(int irq, void *dev_id) | 132 | static irqreturn_t vfio_irq_handler(int irq, void *dev_id) |
@@ -78,6 +168,7 @@ static int vfio_set_trigger(struct vfio_platform_device *vdev, int index, | |||
78 | 168 | ||
79 | irq->trigger = trigger; | 169 | irq->trigger = trigger; |
80 | 170 | ||
171 | irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN); | ||
81 | ret = request_irq(irq->hwirq, handler, 0, irq->name, irq); | 172 | ret = request_irq(irq->hwirq, handler, 0, irq->name, irq); |
82 | if (ret) { | 173 | if (ret) { |
83 | kfree(irq->name); | 174 | kfree(irq->name); |
@@ -86,6 +177,9 @@ static int vfio_set_trigger(struct vfio_platform_device *vdev, int index, | |||
86 | return ret; | 177 | return ret; |
87 | } | 178 | } |
88 | 179 | ||
180 | if (!irq->masked) | ||
181 | enable_irq(irq->hwirq); | ||
182 | |||
89 | return 0; | 183 | return 0; |
90 | } | 184 | } |
91 | 185 | ||
@@ -98,7 +192,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev, | |||
98 | irq_handler_t handler; | 192 | irq_handler_t handler; |
99 | 193 | ||
100 | if (vdev->irqs[index].flags & VFIO_IRQ_INFO_AUTOMASKED) | 194 | if (vdev->irqs[index].flags & VFIO_IRQ_INFO_AUTOMASKED) |
101 | return -EINVAL; /* not implemented */ | 195 | handler = vfio_automasked_irq_handler; |
102 | else | 196 | else |
103 | handler = vfio_irq_handler; | 197 | handler = vfio_irq_handler; |
104 | 198 | ||
@@ -170,6 +264,8 @@ int vfio_platform_irq_init(struct vfio_platform_device *vdev) | |||
170 | if (hwirq < 0) | 264 | if (hwirq < 0) |
171 | goto err; | 265 | goto err; |
172 | 266 | ||
267 | spin_lock_init(&vdev->irqs[i].lock); | ||
268 | |||
173 | vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD; | 269 | vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD; |
174 | 270 | ||
175 | if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) | 271 | if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) |
@@ -178,6 +274,7 @@ int vfio_platform_irq_init(struct vfio_platform_device *vdev) | |||
178 | 274 | ||
179 | vdev->irqs[i].count = 1; | 275 | vdev->irqs[i].count = 1; |
180 | vdev->irqs[i].hwirq = hwirq; | 276 | vdev->irqs[i].hwirq = hwirq; |
277 | vdev->irqs[i].masked = false; | ||
181 | } | 278 | } |
182 | 279 | ||
183 | vdev->num_irqs = cnt; | 280 | vdev->num_irqs = cnt; |
diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h index aa01cc36af53..ff2db1d20a26 100644 --- a/drivers/vfio/platform/vfio_platform_private.h +++ b/drivers/vfio/platform/vfio_platform_private.h | |||
@@ -33,6 +33,8 @@ struct vfio_platform_irq { | |||
33 | int hwirq; | 33 | int hwirq; |
34 | char *name; | 34 | char *name; |
35 | struct eventfd_ctx *trigger; | 35 | struct eventfd_ctx *trigger; |
36 | bool masked; | ||
37 | spinlock_t lock; | ||
36 | }; | 38 | }; |
37 | 39 | ||
38 | struct vfio_platform_region { | 40 | struct vfio_platform_region { |