Merge branches 'release', 'asus', 'sony-laptop' and 'thinkpad' into release
[pandora-kernel.git] / sound / core / seq / seq_fifo.c
1 /*
2  *   ALSA sequencer FIFO
3  *   Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
4  *
5  *
6  *   This program is free software; you can redistribute it and/or modify
7  *   it under the terms of the GNU General Public License as published by
8  *   the Free Software Foundation; either version 2 of the License, or
9  *   (at your option) any later version.
10  *
11  *   This program is distributed in the hope that it will be useful,
12  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *   GNU General Public License for more details.
15  *
16  *   You should have received a copy of the GNU General Public License
17  *   along with this program; if not, write to the Free Software
18  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
19  *
20  */
21
22 #include <sound/core.h>
23 #include <linux/slab.h>
24 #include "seq_fifo.h"
25 #include "seq_lock.h"
26
27
28 /* FIFO */
29
30 /* create new fifo */
31 struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
32 {
33         struct snd_seq_fifo *f;
34
35         f = kzalloc(sizeof(*f), GFP_KERNEL);
36         if (f == NULL) {
37                 snd_printd("malloc failed for snd_seq_fifo_new() \n");
38                 return NULL;
39         }
40
41         f->pool = snd_seq_pool_new(poolsize);
42         if (f->pool == NULL) {
43                 kfree(f);
44                 return NULL;
45         }
46         if (snd_seq_pool_init(f->pool) < 0) {
47                 snd_seq_pool_delete(&f->pool);
48                 kfree(f);
49                 return NULL;
50         }
51
52         spin_lock_init(&f->lock);
53         snd_use_lock_init(&f->use_lock);
54         init_waitqueue_head(&f->input_sleep);
55         atomic_set(&f->overflow, 0);
56
57         f->head = NULL;
58         f->tail = NULL;
59         f->cells = 0;
60         
61         return f;
62 }
63
64 void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
65 {
66         struct snd_seq_fifo *f;
67
68         snd_assert(fifo != NULL, return);
69         f = *fifo;
70         snd_assert(f != NULL, return);
71         *fifo = NULL;
72
73         snd_seq_fifo_clear(f);
74
75         /* wake up clients if any */
76         if (waitqueue_active(&f->input_sleep))
77                 wake_up(&f->input_sleep);
78
79         /* release resources...*/
80         /*....................*/
81
82         if (f->pool) {
83                 snd_seq_pool_done(f->pool);
84                 snd_seq_pool_delete(&f->pool);
85         }
86         
87         kfree(f);
88 }
89
90 static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
91
92 /* clear queue */
93 void snd_seq_fifo_clear(struct snd_seq_fifo *f)
94 {
95         struct snd_seq_event_cell *cell;
96         unsigned long flags;
97
98         /* clear overflow flag */
99         atomic_set(&f->overflow, 0);
100
101         snd_use_lock_sync(&f->use_lock);
102         spin_lock_irqsave(&f->lock, flags);
103         /* drain the fifo */
104         while ((cell = fifo_cell_out(f)) != NULL) {
105                 snd_seq_cell_free(cell);
106         }
107         spin_unlock_irqrestore(&f->lock, flags);
108 }
109
110
111 /* enqueue event to fifo */
112 int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
113                           struct snd_seq_event *event)
114 {
115         struct snd_seq_event_cell *cell;
116         unsigned long flags;
117         int err;
118
119         snd_assert(f != NULL, return -EINVAL);
120
121         snd_use_lock_use(&f->use_lock);
122         err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
123         if (err < 0) {
124                 if (err == -ENOMEM)
125                         atomic_inc(&f->overflow);
126                 snd_use_lock_free(&f->use_lock);
127                 return err;
128         }
129                 
130         /* append new cells to fifo */
131         spin_lock_irqsave(&f->lock, flags);
132         if (f->tail != NULL)
133                 f->tail->next = cell;
134         f->tail = cell;
135         if (f->head == NULL)
136                 f->head = cell;
137         f->cells++;
138         spin_unlock_irqrestore(&f->lock, flags);
139
140         /* wakeup client */
141         if (waitqueue_active(&f->input_sleep))
142                 wake_up(&f->input_sleep);
143
144         snd_use_lock_free(&f->use_lock);
145
146         return 0; /* success */
147
148 }
149
150 /* dequeue cell from fifo */
151 static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
152 {
153         struct snd_seq_event_cell *cell;
154
155         if ((cell = f->head) != NULL) {
156                 f->head = cell->next;
157
158                 /* reset tail if this was the last element */
159                 if (f->tail == cell)
160                         f->tail = NULL;
161
162                 cell->next = NULL;
163                 f->cells--;
164         }
165
166         return cell;
167 }
168
169 /* dequeue cell from fifo and copy on user space */
170 int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
171                           struct snd_seq_event_cell **cellp, int nonblock)
172 {
173         struct snd_seq_event_cell *cell;
174         unsigned long flags;
175         wait_queue_t wait;
176
177         snd_assert(f != NULL, return -EINVAL);
178
179         *cellp = NULL;
180         init_waitqueue_entry(&wait, current);
181         spin_lock_irqsave(&f->lock, flags);
182         while ((cell = fifo_cell_out(f)) == NULL) {
183                 if (nonblock) {
184                         /* non-blocking - return immediately */
185                         spin_unlock_irqrestore(&f->lock, flags);
186                         return -EAGAIN;
187                 }
188                 set_current_state(TASK_INTERRUPTIBLE);
189                 add_wait_queue(&f->input_sleep, &wait);
190                 spin_unlock_irq(&f->lock);
191                 schedule();
192                 spin_lock_irq(&f->lock);
193                 remove_wait_queue(&f->input_sleep, &wait);
194                 if (signal_pending(current)) {
195                         spin_unlock_irqrestore(&f->lock, flags);
196                         return -ERESTARTSYS;
197                 }
198         }
199         spin_unlock_irqrestore(&f->lock, flags);
200         *cellp = cell;
201
202         return 0;
203 }
204
205
206 void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
207                                struct snd_seq_event_cell *cell)
208 {
209         unsigned long flags;
210
211         if (cell) {
212                 spin_lock_irqsave(&f->lock, flags);
213                 cell->next = f->head;
214                 f->head = cell;
215                 f->cells++;
216                 spin_unlock_irqrestore(&f->lock, flags);
217         }
218 }
219
220
221 /* polling; return non-zero if queue is available */
222 int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
223                            poll_table *wait)
224 {
225         poll_wait(file, &f->input_sleep, wait);
226         return (f->cells > 0);
227 }
228
229 /* change the size of pool; all old events are removed */
230 int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
231 {
232         unsigned long flags;
233         struct snd_seq_pool *newpool, *oldpool;
234         struct snd_seq_event_cell *cell, *next, *oldhead;
235
236         snd_assert(f != NULL && f->pool != NULL, return -EINVAL);
237
238         /* allocate new pool */
239         newpool = snd_seq_pool_new(poolsize);
240         if (newpool == NULL)
241                 return -ENOMEM;
242         if (snd_seq_pool_init(newpool) < 0) {
243                 snd_seq_pool_delete(&newpool);
244                 return -ENOMEM;
245         }
246
247         spin_lock_irqsave(&f->lock, flags);
248         /* remember old pool */
249         oldpool = f->pool;
250         oldhead = f->head;
251         /* exchange pools */
252         f->pool = newpool;
253         f->head = NULL;
254         f->tail = NULL;
255         f->cells = 0;
256         /* NOTE: overflow flag is not cleared */
257         spin_unlock_irqrestore(&f->lock, flags);
258
259         /* release cells in old pool */
260         for (cell = oldhead; cell; cell = next) {
261                 next = cell->next;
262                 snd_seq_cell_free(cell);
263         }
264         snd_seq_pool_delete(&oldpool);
265
266         return 0;
267 }