b1c19fc2f08cbd1e5f0d15e3e1a0dd7d75c4eb5b
[pandora-kernel.git] / drivers / media / video / v4l2-event.c
1 /*
2  * v4l2-event.c
3  *
4  * V4L2 events.
5  *
6  * Copyright (C) 2009--2010 Nokia Corporation.
7  *
8  * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * version 2 as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22  * 02110-1301 USA
23  */
24
25 #include <media/v4l2-dev.h>
26 #include <media/v4l2-fh.h>
27 #include <media/v4l2-event.h>
28 #include <media/v4l2-ctrls.h>
29
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32
33 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
34 {
35         idx += sev->first;
36         return idx >= sev->elems ? idx - sev->elems : idx;
37 }
38
39 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
40 {
41         struct v4l2_kevent *kev;
42         unsigned long flags;
43
44         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
45
46         if (list_empty(&fh->available)) {
47                 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
48                 return -ENOENT;
49         }
50
51         WARN_ON(fh->navailable == 0);
52
53         kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
54         list_del(&kev->list);
55         fh->navailable--;
56
57         kev->event.pending = fh->navailable;
58         *event = kev->event;
59         kev->sev->first = sev_pos(kev->sev, 1);
60         kev->sev->in_use--;
61
62         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
63
64         return 0;
65 }
66
67 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
68                        int nonblocking)
69 {
70         int ret;
71
72         if (nonblocking)
73                 return __v4l2_event_dequeue(fh, event);
74
75         /* Release the vdev lock while waiting */
76         if (fh->vdev->lock)
77                 mutex_unlock(fh->vdev->lock);
78
79         do {
80                 ret = wait_event_interruptible(fh->wait,
81                                                fh->navailable != 0);
82                 if (ret < 0)
83                         break;
84
85                 ret = __v4l2_event_dequeue(fh, event);
86         } while (ret == -ENOENT);
87
88         if (fh->vdev->lock)
89                 mutex_lock(fh->vdev->lock);
90
91         return ret;
92 }
93 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
94
95 /* Caller must hold fh->vdev->fh_lock! */
96 static struct v4l2_subscribed_event *v4l2_event_subscribed(
97                 struct v4l2_fh *fh, u32 type, u32 id)
98 {
99         struct v4l2_subscribed_event *sev;
100
101         assert_spin_locked(&fh->vdev->fh_lock);
102
103         list_for_each_entry(sev, &fh->subscribed, list) {
104                 if (sev->type == type && sev->id == id)
105                         return sev;
106         }
107
108         return NULL;
109 }
110
111 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
112                 const struct timespec *ts)
113 {
114         struct v4l2_subscribed_event *sev;
115         struct v4l2_kevent *kev;
116         bool copy_payload = true;
117
118         /* Are we subscribed? */
119         sev = v4l2_event_subscribed(fh, ev->type, ev->id);
120         if (sev == NULL)
121                 return;
122
123         /* Increase event sequence number on fh. */
124         fh->sequence++;
125
126         /* Do we have any free events? */
127         if (sev->in_use == sev->elems) {
128                 /* no, remove the oldest one */
129                 kev = sev->events + sev_pos(sev, 0);
130                 list_del(&kev->list);
131                 sev->in_use--;
132                 sev->first = sev_pos(sev, 1);
133                 fh->navailable--;
134                 if (sev->elems == 1) {
135                         if (sev->replace) {
136                                 sev->replace(&kev->event, ev);
137                                 copy_payload = false;
138                         }
139                 } else if (sev->merge) {
140                         struct v4l2_kevent *second_oldest =
141                                 sev->events + sev_pos(sev, 0);
142                         sev->merge(&kev->event, &second_oldest->event);
143                 }
144         }
145
146         /* Take one and fill it. */
147         kev = sev->events + sev_pos(sev, sev->in_use);
148         kev->event.type = ev->type;
149         if (copy_payload)
150                 kev->event.u = ev->u;
151         kev->event.id = ev->id;
152         kev->event.timestamp = *ts;
153         kev->event.sequence = fh->sequence;
154         sev->in_use++;
155         list_add_tail(&kev->list, &fh->available);
156
157         fh->navailable++;
158
159         wake_up_all(&fh->wait);
160 }
161
162 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
163 {
164         struct v4l2_fh *fh;
165         unsigned long flags;
166         struct timespec timestamp;
167
168         ktime_get_ts(&timestamp);
169
170         spin_lock_irqsave(&vdev->fh_lock, flags);
171
172         list_for_each_entry(fh, &vdev->fh_list, list) {
173                 __v4l2_event_queue_fh(fh, ev, &timestamp);
174         }
175
176         spin_unlock_irqrestore(&vdev->fh_lock, flags);
177 }
178 EXPORT_SYMBOL_GPL(v4l2_event_queue);
179
180 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
181 {
182         unsigned long flags;
183         struct timespec timestamp;
184
185         ktime_get_ts(&timestamp);
186
187         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
188         __v4l2_event_queue_fh(fh, ev, &timestamp);
189         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
190 }
191 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
192
193 int v4l2_event_pending(struct v4l2_fh *fh)
194 {
195         return fh->navailable;
196 }
197 EXPORT_SYMBOL_GPL(v4l2_event_pending);
198
199 static void ctrls_replace(struct v4l2_event *old, const struct v4l2_event *new)
200 {
201         u32 old_changes = old->u.ctrl.changes;
202
203         old->u.ctrl = new->u.ctrl;
204         old->u.ctrl.changes |= old_changes;
205 }
206
207 static void ctrls_merge(const struct v4l2_event *old, struct v4l2_event *new)
208 {
209         new->u.ctrl.changes |= old->u.ctrl.changes;
210 }
211
212 int v4l2_event_subscribe(struct v4l2_fh *fh,
213                          struct v4l2_event_subscription *sub, unsigned elems)
214 {
215         struct v4l2_subscribed_event *sev, *found_ev;
216         struct v4l2_ctrl *ctrl = NULL;
217         unsigned long flags;
218         unsigned i;
219
220         if (elems < 1)
221                 elems = 1;
222         if (sub->type == V4L2_EVENT_CTRL) {
223                 ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id);
224                 if (ctrl == NULL)
225                         return -EINVAL;
226         }
227
228         sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
229         if (!sev)
230                 return -ENOMEM;
231         for (i = 0; i < elems; i++)
232                 sev->events[i].sev = sev;
233         sev->type = sub->type;
234         sev->id = sub->id;
235         sev->flags = sub->flags;
236         sev->fh = fh;
237         sev->elems = elems;
238         if (ctrl) {
239                 sev->replace = ctrls_replace;
240                 sev->merge = ctrls_merge;
241         }
242
243         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
244         found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
245         if (!found_ev)
246                 list_add(&sev->list, &fh->subscribed);
247         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
248
249         /* v4l2_ctrl_add_event uses a mutex, so do this outside the spin lock */
250         if (found_ev)
251                 kfree(sev);
252         else if (ctrl)
253                 v4l2_ctrl_add_event(ctrl, sev);
254
255         return 0;
256 }
257 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
258
259 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
260 {
261         struct v4l2_event_subscription sub;
262         struct v4l2_subscribed_event *sev;
263         unsigned long flags;
264
265         do {
266                 sev = NULL;
267
268                 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
269                 if (!list_empty(&fh->subscribed)) {
270                         sev = list_first_entry(&fh->subscribed,
271                                         struct v4l2_subscribed_event, list);
272                         sub.type = sev->type;
273                         sub.id = sev->id;
274                 }
275                 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
276                 if (sev)
277                         v4l2_event_unsubscribe(fh, &sub);
278         } while (sev);
279 }
280 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
281
282 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
283                            struct v4l2_event_subscription *sub)
284 {
285         struct v4l2_subscribed_event *sev;
286         unsigned long flags;
287
288         if (sub->type == V4L2_EVENT_ALL) {
289                 v4l2_event_unsubscribe_all(fh);
290                 return 0;
291         }
292
293         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
294
295         sev = v4l2_event_subscribed(fh, sub->type, sub->id);
296         if (sev != NULL) {
297                 list_del(&sev->list);
298                 sev->fh = NULL;
299         }
300
301         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
302         if (sev && sev->type == V4L2_EVENT_CTRL) {
303                 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(fh->ctrl_handler, sev->id);
304
305                 if (ctrl)
306                         v4l2_ctrl_del_event(ctrl, sev);
307         }
308
309         kfree(sev);
310
311         return 0;
312 }
313 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);