6 * Copyright (C) 2009--2010 Nokia Corporation.
8 * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <media/v4l2-dev.h>
26 #include <media/v4l2-fh.h>
27 #include <media/v4l2-event.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
32 int v4l2_event_init(struct v4l2_fh *fh)
34 fh->events = kzalloc(sizeof(*fh->events), GFP_KERNEL);
35 if (fh->events == NULL)
38 init_waitqueue_head(&fh->events->wait);
40 INIT_LIST_HEAD(&fh->events->free);
41 INIT_LIST_HEAD(&fh->events->available);
42 INIT_LIST_HEAD(&fh->events->subscribed);
44 fh->events->sequence = -1;
48 EXPORT_SYMBOL_GPL(v4l2_event_init);
50 int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
52 struct v4l2_events *events = fh->events;
60 while (events->nallocated < n) {
61 struct v4l2_kevent *kev;
63 kev = kzalloc(sizeof(*kev), GFP_KERNEL);
67 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
68 list_add_tail(&kev->list, &events->free);
70 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
75 EXPORT_SYMBOL_GPL(v4l2_event_alloc);
77 #define list_kfree(list, type, member) \
78 while (!list_empty(list)) { \
80 hi = list_first_entry(list, type, member); \
81 list_del(&hi->member); \
85 void v4l2_event_free(struct v4l2_fh *fh)
87 struct v4l2_events *events = fh->events;
92 list_kfree(&events->free, struct v4l2_kevent, list);
93 list_kfree(&events->available, struct v4l2_kevent, list);
94 list_kfree(&events->subscribed, struct v4l2_subscribed_event, list);
99 EXPORT_SYMBOL_GPL(v4l2_event_free);
101 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
103 struct v4l2_events *events = fh->events;
104 struct v4l2_kevent *kev;
107 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
109 if (list_empty(&events->available)) {
110 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
114 WARN_ON(events->navailable == 0);
116 kev = list_first_entry(&events->available, struct v4l2_kevent, list);
117 list_move(&kev->list, &events->free);
118 events->navailable--;
120 kev->event.pending = events->navailable;
123 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
128 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
131 struct v4l2_events *events = fh->events;
135 return __v4l2_event_dequeue(fh, event);
138 ret = wait_event_interruptible(events->wait,
139 events->navailable != 0);
143 ret = __v4l2_event_dequeue(fh, event);
144 } while (ret == -ENOENT);
148 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
150 /* Caller must hold fh->event->lock! */
151 static struct v4l2_subscribed_event *v4l2_event_subscribed(
152 struct v4l2_fh *fh, u32 type)
154 struct v4l2_events *events = fh->events;
155 struct v4l2_subscribed_event *sev;
157 assert_spin_locked(&fh->vdev->fh_lock);
159 list_for_each_entry(sev, &events->subscribed, list) {
160 if (sev->type == type)
167 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
171 struct timespec timestamp;
173 ktime_get_ts(×tamp);
175 spin_lock_irqsave(&vdev->fh_lock, flags);
177 list_for_each_entry(fh, &vdev->fh_list, list) {
178 struct v4l2_events *events = fh->events;
179 struct v4l2_kevent *kev;
181 /* Are we subscribed? */
182 if (!v4l2_event_subscribed(fh, ev->type))
185 /* Increase event sequence number on fh. */
188 /* Do we have any free events? */
189 if (list_empty(&events->free))
192 /* Take one and fill it. */
193 kev = list_first_entry(&events->free, struct v4l2_kevent, list);
194 kev->event.type = ev->type;
195 kev->event.u = ev->u;
196 kev->event.timestamp = timestamp;
197 kev->event.sequence = events->sequence;
198 list_move_tail(&kev->list, &events->available);
200 events->navailable++;
202 wake_up_all(&events->wait);
205 spin_unlock_irqrestore(&vdev->fh_lock, flags);
207 EXPORT_SYMBOL_GPL(v4l2_event_queue);
209 int v4l2_event_pending(struct v4l2_fh *fh)
211 return fh->events->navailable;
213 EXPORT_SYMBOL_GPL(v4l2_event_pending);
215 int v4l2_event_subscribe(struct v4l2_fh *fh,
216 struct v4l2_event_subscription *sub)
218 struct v4l2_events *events = fh->events;
219 struct v4l2_subscribed_event *sev;
222 if (fh->events == NULL) {
227 sev = kmalloc(sizeof(*sev), GFP_KERNEL);
231 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
233 if (v4l2_event_subscribed(fh, sub->type) == NULL) {
234 INIT_LIST_HEAD(&sev->list);
235 sev->type = sub->type;
237 list_add(&sev->list, &events->subscribed);
241 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
247 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
249 static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
251 struct v4l2_events *events = fh->events;
252 struct v4l2_subscribed_event *sev;
258 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
259 if (!list_empty(&events->subscribed)) {
260 sev = list_first_entry(&events->subscribed,
261 struct v4l2_subscribed_event, list);
262 list_del(&sev->list);
264 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
269 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
270 struct v4l2_event_subscription *sub)
272 struct v4l2_subscribed_event *sev;
275 if (sub->type == V4L2_EVENT_ALL) {
276 v4l2_event_unsubscribe_all(fh);
280 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
282 sev = v4l2_event_subscribed(fh, sub->type);
284 list_del(&sev->list);
286 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
292 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);