aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/v4l2-event.c
diff options
context:
space:
mode:
authorHans Verkuil <hans.verkuil@cisco.com>2011-06-13 16:44:42 -0400
committerMauro Carvalho Chehab <mchehab@redhat.com>2011-07-27 16:53:31 -0400
commit523f46d6aba9dcb0a2d0fc474ca884e93a7cf198 (patch)
tree5c73cecf3d9786868ac03c93f7df2df6a465d9a6 /drivers/media/video/v4l2-event.c
parent95904d4b6a188ea2f0f1781498f6ca626e21b9ac (diff)
[media] v4l2-events/fh: merge v4l2_events into v4l2_fh
Drivers that supported events used to be rare, but now that controls can also raise events this will become much more common since almost all drivers have controls. This means that keeping struct v4l2_events as a separate struct make no more sense. Merging it into struct v4l2_fh simplifies things substantially as it is now an integral part of the filehandle struct. Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Diffstat (limited to 'drivers/media/video/v4l2-event.c')
-rw-r--r--drivers/media/video/v4l2-event.c93
1 files changed, 25 insertions, 68 deletions
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
index 670f2f834e6a..70fa82daaca7 100644
--- a/drivers/media/video/v4l2-event.c
+++ b/drivers/media/video/v4l2-event.c
@@ -32,35 +32,11 @@
32 32
33static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh); 33static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh);
34 34
35int v4l2_event_init(struct v4l2_fh *fh)
36{
37 fh->events = kzalloc(sizeof(*fh->events), GFP_KERNEL);
38 if (fh->events == NULL)
39 return -ENOMEM;
40
41 init_waitqueue_head(&fh->events->wait);
42
43 INIT_LIST_HEAD(&fh->events->free);
44 INIT_LIST_HEAD(&fh->events->available);
45 INIT_LIST_HEAD(&fh->events->subscribed);
46
47 fh->events->sequence = -1;
48
49 return 0;
50}
51EXPORT_SYMBOL_GPL(v4l2_event_init);
52
53int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n) 35int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
54{ 36{
55 struct v4l2_events *events = fh->events;
56 unsigned long flags; 37 unsigned long flags;
57 38
58 if (!events) { 39 while (fh->nallocated < n) {
59 WARN_ON(1);
60 return -ENOMEM;
61 }
62
63 while (events->nallocated < n) {
64 struct v4l2_kevent *kev; 40 struct v4l2_kevent *kev;
65 41
66 kev = kzalloc(sizeof(*kev), GFP_KERNEL); 42 kev = kzalloc(sizeof(*kev), GFP_KERNEL);
@@ -68,8 +44,8 @@ int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
68 return -ENOMEM; 44 return -ENOMEM;
69 45
70 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 46 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
71 list_add_tail(&kev->list, &events->free); 47 list_add_tail(&kev->list, &fh->free);
72 events->nallocated++; 48 fh->nallocated++;
73 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 49 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
74 } 50 }
75 51
@@ -87,40 +63,31 @@ EXPORT_SYMBOL_GPL(v4l2_event_alloc);
87 63
88void v4l2_event_free(struct v4l2_fh *fh) 64void v4l2_event_free(struct v4l2_fh *fh)
89{ 65{
90 struct v4l2_events *events = fh->events; 66 list_kfree(&fh->free, struct v4l2_kevent, list);
91 67 list_kfree(&fh->available, struct v4l2_kevent, list);
92 if (!events)
93 return;
94
95 list_kfree(&events->free, struct v4l2_kevent, list);
96 list_kfree(&events->available, struct v4l2_kevent, list);
97 v4l2_event_unsubscribe_all(fh); 68 v4l2_event_unsubscribe_all(fh);
98
99 kfree(events);
100 fh->events = NULL;
101} 69}
102EXPORT_SYMBOL_GPL(v4l2_event_free); 70EXPORT_SYMBOL_GPL(v4l2_event_free);
103 71
104static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) 72static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
105{ 73{
106 struct v4l2_events *events = fh->events;
107 struct v4l2_kevent *kev; 74 struct v4l2_kevent *kev;
108 unsigned long flags; 75 unsigned long flags;
109 76
110 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 77 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
111 78
112 if (list_empty(&events->available)) { 79 if (list_empty(&fh->available)) {
113 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 80 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
114 return -ENOENT; 81 return -ENOENT;
115 } 82 }
116 83
117 WARN_ON(events->navailable == 0); 84 WARN_ON(fh->navailable == 0);
118 85
119 kev = list_first_entry(&events->available, struct v4l2_kevent, list); 86 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
120 list_move(&kev->list, &events->free); 87 list_move(&kev->list, &fh->free);
121 events->navailable--; 88 fh->navailable--;
122 89
123 kev->event.pending = events->navailable; 90 kev->event.pending = fh->navailable;
124 *event = kev->event; 91 *event = kev->event;
125 92
126 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 93 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
@@ -131,7 +98,6 @@ static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
131int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, 98int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
132 int nonblocking) 99 int nonblocking)
133{ 100{
134 struct v4l2_events *events = fh->events;
135 int ret; 101 int ret;
136 102
137 if (nonblocking) 103 if (nonblocking)
@@ -142,8 +108,8 @@ int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
142 mutex_unlock(fh->vdev->lock); 108 mutex_unlock(fh->vdev->lock);
143 109
144 do { 110 do {
145 ret = wait_event_interruptible(events->wait, 111 ret = wait_event_interruptible(fh->wait,
146 events->navailable != 0); 112 fh->navailable != 0);
147 if (ret < 0) 113 if (ret < 0)
148 break; 114 break;
149 115
@@ -161,12 +127,11 @@ EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
161static struct v4l2_subscribed_event *v4l2_event_subscribed( 127static struct v4l2_subscribed_event *v4l2_event_subscribed(
162 struct v4l2_fh *fh, u32 type, u32 id) 128 struct v4l2_fh *fh, u32 type, u32 id)
163{ 129{
164 struct v4l2_events *events = fh->events;
165 struct v4l2_subscribed_event *sev; 130 struct v4l2_subscribed_event *sev;
166 131
167 assert_spin_locked(&fh->vdev->fh_lock); 132 assert_spin_locked(&fh->vdev->fh_lock);
168 133
169 list_for_each_entry(sev, &events->subscribed, list) { 134 list_for_each_entry(sev, &fh->subscribed, list) {
170 if (sev->type == type && sev->id == id) 135 if (sev->type == type && sev->id == id)
171 return sev; 136 return sev;
172 } 137 }
@@ -177,7 +142,6 @@ static struct v4l2_subscribed_event *v4l2_event_subscribed(
177static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev, 142static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
178 const struct timespec *ts) 143 const struct timespec *ts)
179{ 144{
180 struct v4l2_events *events = fh->events;
181 struct v4l2_subscribed_event *sev; 145 struct v4l2_subscribed_event *sev;
182 struct v4l2_kevent *kev; 146 struct v4l2_kevent *kev;
183 147
@@ -187,24 +151,24 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
187 return; 151 return;
188 152
189 /* Increase event sequence number on fh. */ 153 /* Increase event sequence number on fh. */
190 events->sequence++; 154 fh->sequence++;
191 155
192 /* Do we have any free events? */ 156 /* Do we have any free events? */
193 if (list_empty(&events->free)) 157 if (list_empty(&fh->free))
194 return; 158 return;
195 159
196 /* Take one and fill it. */ 160 /* Take one and fill it. */
197 kev = list_first_entry(&events->free, struct v4l2_kevent, list); 161 kev = list_first_entry(&fh->free, struct v4l2_kevent, list);
198 kev->event.type = ev->type; 162 kev->event.type = ev->type;
199 kev->event.u = ev->u; 163 kev->event.u = ev->u;
200 kev->event.id = ev->id; 164 kev->event.id = ev->id;
201 kev->event.timestamp = *ts; 165 kev->event.timestamp = *ts;
202 kev->event.sequence = events->sequence; 166 kev->event.sequence = fh->sequence;
203 list_move_tail(&kev->list, &events->available); 167 list_move_tail(&kev->list, &fh->available);
204 168
205 events->navailable++; 169 fh->navailable++;
206 170
207 wake_up_all(&events->wait); 171 wake_up_all(&fh->wait);
208} 172}
209 173
210void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev) 174void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
@@ -240,24 +204,18 @@ EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
240 204
241int v4l2_event_pending(struct v4l2_fh *fh) 205int v4l2_event_pending(struct v4l2_fh *fh)
242{ 206{
243 return fh->events->navailable; 207 return fh->navailable;
244} 208}
245EXPORT_SYMBOL_GPL(v4l2_event_pending); 209EXPORT_SYMBOL_GPL(v4l2_event_pending);
246 210
247int v4l2_event_subscribe(struct v4l2_fh *fh, 211int v4l2_event_subscribe(struct v4l2_fh *fh,
248 struct v4l2_event_subscription *sub) 212 struct v4l2_event_subscription *sub)
249{ 213{
250 struct v4l2_events *events = fh->events;
251 struct v4l2_subscribed_event *sev, *found_ev; 214 struct v4l2_subscribed_event *sev, *found_ev;
252 struct v4l2_ctrl *ctrl = NULL; 215 struct v4l2_ctrl *ctrl = NULL;
253 struct v4l2_ctrl_fh *ctrl_fh = NULL; 216 struct v4l2_ctrl_fh *ctrl_fh = NULL;
254 unsigned long flags; 217 unsigned long flags;
255 218
256 if (fh->events == NULL) {
257 WARN_ON(1);
258 return -ENOMEM;
259 }
260
261 if (sub->type == V4L2_EVENT_CTRL) { 219 if (sub->type == V4L2_EVENT_CTRL) {
262 ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id); 220 ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id);
263 if (ctrl == NULL) 221 if (ctrl == NULL)
@@ -284,7 +242,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
284 sev->type = sub->type; 242 sev->type = sub->type;
285 sev->id = sub->id; 243 sev->id = sub->id;
286 244
287 list_add(&sev->list, &events->subscribed); 245 list_add(&sev->list, &fh->subscribed);
288 sev = NULL; 246 sev = NULL;
289 } 247 }
290 248
@@ -306,7 +264,6 @@ EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
306 264
307static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh) 265static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
308{ 266{
309 struct v4l2_events *events = fh->events;
310 struct v4l2_event_subscription sub; 267 struct v4l2_event_subscription sub;
311 struct v4l2_subscribed_event *sev; 268 struct v4l2_subscribed_event *sev;
312 unsigned long flags; 269 unsigned long flags;
@@ -315,8 +272,8 @@ static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
315 sev = NULL; 272 sev = NULL;
316 273
317 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 274 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
318 if (!list_empty(&events->subscribed)) { 275 if (!list_empty(&fh->subscribed)) {
319 sev = list_first_entry(&events->subscribed, 276 sev = list_first_entry(&fh->subscribed,
320 struct v4l2_subscribed_event, list); 277 struct v4l2_subscribed_event, list);
321 sub.type = sev->type; 278 sub.type = sev->type;
322 sub.id = sev->id; 279 sub.id = sev->id;