aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/v4l2-event.c
diff options
context:
space:
mode:
authorHans Verkuil <hans.verkuil@cisco.com>2011-06-13 18:24:17 -0400
committerMauro Carvalho Chehab <mchehab@redhat.com>2011-07-27 16:53:31 -0400
commitf1e393de382af9b9bd2462a42bfa16b8c501d81b (patch)
tree1135a4a27166d3947ebfcf2be171efa2d8cd3228 /drivers/media/video/v4l2-event.c
parent77068d36d8b9e9902a89b4bb01011d41926f5420 (diff)
[media] v4l2-event/ctrls/fh: allocate events per fh and per type instead of just per-fh
The driver had to decide how many events to allocate when the v4l2_fh struct was created. It was possible to add more events afterwards, but there was no way to ensure that you wouldn't miss important events if the event queue would fill up for that filehandle. In addition, once there were no more free events, any new events were simply dropped on the floor. For the control event in particular this made life very difficult since control status/value changes could just be missed if the number of allocated events and the speed at which the application read events was too low to keep up with the number of generated events. The application would have no idea what the latest state was for a control since it could have missed the latest control change. So this patch makes some major changes in how events are allocated. Instead of allocating events per-filehandle they are now allocated when subscribing an event. So for that particular event type N events (determined by the driver) are allocated. Those events are reserved for that particular event type. This ensures that you will not miss events for a particular type altogether. In addition, if there are N events in use and a new event is raised, then the oldest event is dropped and the new one is added. So the latest event is always available. This can be further improved by adding the ability to merge the state of two events together, ensuring that no data is lost at all. This will be added in the next patch. This also makes it possible to allow the user to determine the number of events that will be allocated. This is not implemented at the moment, but would be trivial. Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Diffstat (limited to 'drivers/media/video/v4l2-event.c')
-rw-r--r--drivers/media/video/v4l2-event.c88
1 files changed, 33 insertions, 55 deletions
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
index dc68f6085697..9e325dd3ce27 100644
--- a/drivers/media/video/v4l2-event.c
+++ b/drivers/media/video/v4l2-event.c
@@ -30,44 +30,11 @@
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32 32
33static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh); 33static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
34
35int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
36{
37 unsigned long flags;
38
39 while (fh->nallocated < n) {
40 struct v4l2_kevent *kev;
41
42 kev = kzalloc(sizeof(*kev), GFP_KERNEL);
43 if (kev == NULL)
44 return -ENOMEM;
45
46 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
47 list_add_tail(&kev->list, &fh->free);
48 fh->nallocated++;
49 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
50 }
51
52 return 0;
53}
54EXPORT_SYMBOL_GPL(v4l2_event_alloc);
55
56#define list_kfree(list, type, member) \
57 while (!list_empty(list)) { \
58 type *hi; \
59 hi = list_first_entry(list, type, member); \
60 list_del(&hi->member); \
61 kfree(hi); \
62 }
63
64void v4l2_event_free(struct v4l2_fh *fh)
65{ 34{
66 list_kfree(&fh->free, struct v4l2_kevent, list); 35 idx += sev->first;
67 list_kfree(&fh->available, struct v4l2_kevent, list); 36 return idx >= sev->elems ? idx - sev->elems : idx;
68 v4l2_event_unsubscribe_all(fh);
69} 37}
70EXPORT_SYMBOL_GPL(v4l2_event_free);
71 38
72static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) 39static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
73{ 40{
@@ -84,11 +51,13 @@ static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
84 WARN_ON(fh->navailable == 0); 51 WARN_ON(fh->navailable == 0);
85 52
86 kev = list_first_entry(&fh->available, struct v4l2_kevent, list); 53 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
87 list_move(&kev->list, &fh->free); 54 list_del(&kev->list);
88 fh->navailable--; 55 fh->navailable--;
89 56
90 kev->event.pending = fh->navailable; 57 kev->event.pending = fh->navailable;
91 *event = kev->event; 58 *event = kev->event;
59 kev->sev->first = sev_pos(kev->sev, 1);
60 kev->sev->in_use--;
92 61
93 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 62 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
94 63
@@ -154,17 +123,24 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
154 fh->sequence++; 123 fh->sequence++;
155 124
156 /* Do we have any free events? */ 125 /* Do we have any free events? */
157 if (list_empty(&fh->free)) 126 if (sev->in_use == sev->elems) {
158 return; 127 /* no, remove the oldest one */
128 kev = sev->events + sev_pos(sev, 0);
129 list_del(&kev->list);
130 sev->in_use--;
131 sev->first = sev_pos(sev, 1);
132 fh->navailable--;
133 }
159 134
160 /* Take one and fill it. */ 135 /* Take one and fill it. */
161 kev = list_first_entry(&fh->free, struct v4l2_kevent, list); 136 kev = sev->events + sev_pos(sev, sev->in_use);
162 kev->event.type = ev->type; 137 kev->event.type = ev->type;
163 kev->event.u = ev->u; 138 kev->event.u = ev->u;
164 kev->event.id = ev->id; 139 kev->event.id = ev->id;
165 kev->event.timestamp = *ts; 140 kev->event.timestamp = *ts;
166 kev->event.sequence = fh->sequence; 141 kev->event.sequence = fh->sequence;
167 list_move_tail(&kev->list, &fh->available); 142 sev->in_use++;
143 list_add_tail(&kev->list, &fh->available);
168 144
169 fh->navailable++; 145 fh->navailable++;
170 146
@@ -209,38 +185,39 @@ int v4l2_event_pending(struct v4l2_fh *fh)
209EXPORT_SYMBOL_GPL(v4l2_event_pending); 185EXPORT_SYMBOL_GPL(v4l2_event_pending);
210 186
211int v4l2_event_subscribe(struct v4l2_fh *fh, 187int v4l2_event_subscribe(struct v4l2_fh *fh,
212 struct v4l2_event_subscription *sub) 188 struct v4l2_event_subscription *sub, unsigned elems)
213{ 189{
214 struct v4l2_subscribed_event *sev, *found_ev; 190 struct v4l2_subscribed_event *sev, *found_ev;
215 struct v4l2_ctrl *ctrl = NULL; 191 struct v4l2_ctrl *ctrl = NULL;
216 unsigned long flags; 192 unsigned long flags;
193 unsigned i;
217 194
195 if (elems < 1)
196 elems = 1;
218 if (sub->type == V4L2_EVENT_CTRL) { 197 if (sub->type == V4L2_EVENT_CTRL) {
219 ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id); 198 ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id);
220 if (ctrl == NULL) 199 if (ctrl == NULL)
221 return -EINVAL; 200 return -EINVAL;
222 } 201 }
223 202
224 sev = kzalloc(sizeof(*sev), GFP_KERNEL); 203 sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
225 if (!sev) 204 if (!sev)
226 return -ENOMEM; 205 return -ENOMEM;
206 for (i = 0; i < elems; i++)
207 sev->events[i].sev = sev;
208 sev->type = sub->type;
209 sev->id = sub->id;
210 sev->flags = sub->flags;
211 sev->fh = fh;
212 sev->elems = elems;
227 213
228 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 214 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
229
230 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); 215 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
231 if (!found_ev) { 216 if (!found_ev)
232 INIT_LIST_HEAD(&sev->list);
233 sev->type = sub->type;
234 sev->id = sub->id;
235 sev->fh = fh;
236 sev->flags = sub->flags;
237
238 list_add(&sev->list, &fh->subscribed); 217 list_add(&sev->list, &fh->subscribed);
239 }
240
241 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 218 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
242 219
243 /* v4l2_ctrl_add_fh uses a mutex, so do this outside the spin lock */ 220 /* v4l2_ctrl_add_event uses a mutex, so do this outside the spin lock */
244 if (found_ev) 221 if (found_ev)
245 kfree(sev); 222 kfree(sev);
246 else if (ctrl) 223 else if (ctrl)
@@ -250,7 +227,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
250} 227}
251EXPORT_SYMBOL_GPL(v4l2_event_subscribe); 228EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
252 229
253static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh) 230void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
254{ 231{
255 struct v4l2_event_subscription sub; 232 struct v4l2_event_subscription sub;
256 struct v4l2_subscribed_event *sev; 233 struct v4l2_subscribed_event *sev;
@@ -271,6 +248,7 @@ static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
271 v4l2_event_unsubscribe(fh, &sub); 248 v4l2_event_unsubscribe(fh, &sub);
272 } while (sev); 249 } while (sev);
273} 250}
251EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
274 252
275int v4l2_event_unsubscribe(struct v4l2_fh *fh, 253int v4l2_event_unsubscribe(struct v4l2_fh *fh,
276 struct v4l2_event_subscription *sub) 254 struct v4l2_event_subscription *sub)