V4L/DVB (6093): ivtv: reorganized and cleanup ivtv struct
[pandora-kernel.git] / drivers / media / video / ivtv / ivtv-irq.c
1 /* interrupt handling
2     Copyright (C) 2003-2004  Kevin Thayer <nufan_wfk at yahoo.com>
3     Copyright (C) 2004  Chris Kennedy <c@groovy.org>
4     Copyright (C) 2005-2007  Hans Verkuil <hverkuil@xs4all.nl>
5
6     This program is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     This program is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with this program; if not, write to the Free Software
18     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  */
20
21 #include "ivtv-driver.h"
22 #include "ivtv-queue.h"
23 #include "ivtv-udma.h"
24 #include "ivtv-irq.h"
25 #include "ivtv-mailbox.h"
26 #include "ivtv-vbi.h"
27 #include "ivtv-yuv.h"
28
29 #define DMA_MAGIC_COOKIE 0x000001fe
30
31 static void ivtv_dma_dec_start(struct ivtv_stream *s);
32
33 static const int ivtv_stream_map[] = {
34         IVTV_ENC_STREAM_TYPE_MPG,
35         IVTV_ENC_STREAM_TYPE_YUV,
36         IVTV_ENC_STREAM_TYPE_PCM,
37         IVTV_ENC_STREAM_TYPE_VBI,
38 };
39
40
41 static void ivtv_pio_work_handler(struct ivtv *itv)
42 {
43         struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
44         struct ivtv_buffer *buf;
45         struct list_head *p;
46         int i = 0;
47
48         IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
49         if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
50                         s->v4l2dev == NULL || !ivtv_use_pio(s)) {
51                 itv->cur_pio_stream = -1;
52                 /* trigger PIO complete user interrupt */
53                 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
54                 return;
55         }
56         IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
57         buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
58         list_for_each(p, &s->q_dma.list) {
59                 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
60                 u32 size = s->sg_processing[i].size & 0x3ffff;
61
62                 /* Copy the data from the card to the buffer */
63                 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
64                         memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
65                 }
66                 else {
67                         memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
68                 }
69                 i++;
70                 if (i == s->sg_processing_size)
71                         break;
72         }
73         write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
74 }
75
76 void ivtv_irq_work_handler(struct work_struct *work)
77 {
78         struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
79
80         DEFINE_WAIT(wait);
81
82         if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
83                 ivtv_pio_work_handler(itv);
84
85         if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
86                 ivtv_vbi_work_handler(itv);
87
88         if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
89                 ivtv_yuv_work_handler(itv);
90 }
91
92 /* Determine the required DMA size, setup enough buffers in the predma queue and
93    actually copy the data from the card to the buffers in case a PIO transfer is
94    required for this stream.
95  */
96 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
97 {
98         struct ivtv *itv = s->itv;
99         struct ivtv_buffer *buf;
100         struct list_head *p;
101         u32 bytes_needed = 0;
102         u32 offset, size;
103         u32 UVoffset = 0, UVsize = 0;
104         int skip_bufs = s->q_predma.buffers;
105         int idx = s->sg_pending_size;
106         int rc;
107
108         /* sanity checks */
109         if (s->v4l2dev == NULL) {
110                 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
111                 return -1;
112         }
113         if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
114                 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
115                 return -1;
116         }
117
118         /* determine offset, size and PTS for the various streams */
119         switch (s->type) {
120                 case IVTV_ENC_STREAM_TYPE_MPG:
121                         offset = data[1];
122                         size = data[2];
123                         s->pending_pts = 0;
124                         break;
125
126                 case IVTV_ENC_STREAM_TYPE_YUV:
127                         offset = data[1];
128                         size = data[2];
129                         UVoffset = data[3];
130                         UVsize = data[4];
131                         s->pending_pts = ((u64) data[5] << 32) | data[6];
132                         break;
133
134                 case IVTV_ENC_STREAM_TYPE_PCM:
135                         offset = data[1] + 12;
136                         size = data[2] - 12;
137                         s->pending_pts = read_dec(offset - 8) |
138                                 ((u64)(read_dec(offset - 12)) << 32);
139                         if (itv->has_cx23415)
140                                 offset += IVTV_DECODER_OFFSET;
141                         break;
142
143                 case IVTV_ENC_STREAM_TYPE_VBI:
144                         size = itv->vbi.enc_size * itv->vbi.fpi;
145                         offset = read_enc(itv->vbi.enc_start - 4) + 12;
146                         if (offset == 12) {
147                                 IVTV_DEBUG_INFO("VBI offset == 0\n");
148                                 return -1;
149                         }
150                         s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
151                         break;
152
153                 case IVTV_DEC_STREAM_TYPE_VBI:
154                         size = read_dec(itv->vbi.dec_start + 4) + 8;
155                         offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
156                         s->pending_pts = 0;
157                         offset += IVTV_DECODER_OFFSET;
158                         break;
159                 default:
160                         /* shouldn't happen */
161                         return -1;
162         }
163
164         /* if this is the start of the DMA then fill in the magic cookie */
165         if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
166                 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
167                     s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
168                         s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
169                         write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
170                 }
171                 else {
172                         s->pending_backup = read_enc(offset);
173                         write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
174                 }
175                 s->pending_offset = offset;
176         }
177
178         bytes_needed = size;
179         if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
180                 /* The size for the Y samples needs to be rounded upwards to a
181                    multiple of the buf_size. The UV samples then start in the
182                    next buffer. */
183                 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
184                 bytes_needed += UVsize;
185         }
186
187         IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
188                 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
189
190         rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
191         if (rc < 0) { /* Insufficient buffers */
192                 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
193                                 bytes_needed, s->name);
194                 return -1;
195         }
196         if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
197                 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
198                 IVTV_WARN("Cause: the application is not reading fast enough.\n");
199         }
200         s->buffers_stolen = rc;
201
202         /* got the buffers, now fill in sg_pending */
203         buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
204         memset(buf->buf, 0, 128);
205         list_for_each(p, &s->q_predma.list) {
206                 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
207
208                 if (skip_bufs-- > 0)
209                         continue;
210                 s->sg_pending[idx].dst = buf->dma_handle;
211                 s->sg_pending[idx].src = offset;
212                 s->sg_pending[idx].size = s->buf_size;
213                 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
214                 buf->dma_xfer_cnt = s->dma_xfer_cnt;
215
216                 s->q_predma.bytesused += buf->bytesused;
217                 size -= buf->bytesused;
218                 offset += s->buf_size;
219
220                 /* Sync SG buffers */
221                 ivtv_buf_sync_for_device(s, buf);
222
223                 if (size == 0) {        /* YUV */
224                         /* process the UV section */
225                         offset = UVoffset;
226                         size = UVsize;
227                 }
228                 idx++;
229         }
230         s->sg_pending_size = idx;
231         return 0;
232 }
233
234 static void dma_post(struct ivtv_stream *s)
235 {
236         struct ivtv *itv = s->itv;
237         struct ivtv_buffer *buf = NULL;
238         struct list_head *p;
239         u32 offset;
240         u32 *u32buf;
241         int x = 0;
242
243         IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
244                         s->name, s->dma_offset);
245         list_for_each(p, &s->q_dma.list) {
246                 buf = list_entry(p, struct ivtv_buffer, list);
247                 u32buf = (u32 *)buf->buf;
248
249                 /* Sync Buffer */
250                 ivtv_buf_sync_for_cpu(s, buf);
251
252                 if (x == 0 && ivtv_use_dma(s)) {
253                         offset = s->dma_last_offset;
254                         if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
255                         {
256                                 for (offset = 0; offset < 64; offset++) {
257                                         if (u32buf[offset] == DMA_MAGIC_COOKIE) {
258                                                 break;
259                                         }
260                                 }
261                                 offset *= 4;
262                                 if (offset == 256) {
263                                         IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
264                                         offset = s->dma_last_offset;
265                                 }
266                                 if (s->dma_last_offset != offset)
267                                         IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
268                                 s->dma_last_offset = offset;
269                         }
270                         if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
271                                                 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
272                                 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
273                         }
274                         else {
275                                 write_enc_sync(0, s->dma_offset);
276                         }
277                         if (offset) {
278                                 buf->bytesused -= offset;
279                                 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
280                         }
281                         *u32buf = cpu_to_le32(s->dma_backup);
282                 }
283                 x++;
284                 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
285                 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
286                     s->type == IVTV_ENC_STREAM_TYPE_VBI)
287                         buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
288         }
289         if (buf)
290                 buf->bytesused += s->dma_last_offset;
291         if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
292                 list_for_each(p, &s->q_dma.list) {
293                         buf = list_entry(p, struct ivtv_buffer, list);
294
295                         /* Parse and Groom VBI Data */
296                         s->q_dma.bytesused -= buf->bytesused;
297                         ivtv_process_vbi_data(itv, buf, 0, s->type);
298                         s->q_dma.bytesused += buf->bytesused;
299                 }
300                 if (s->id == -1) {
301                         ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
302                         return;
303                 }
304         }
305         ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
306         if (s->id != -1)
307                 wake_up(&s->waitq);
308 }
309
310 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
311 {
312         struct ivtv *itv = s->itv;
313         struct ivtv_buffer *buf;
314         struct list_head *p;
315         u32 y_size = itv->params.height * itv->params.width;
316         u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
317         int y_done = 0;
318         int bytes_written = 0;
319         unsigned long flags = 0;
320         int idx = 0;
321
322         IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
323         buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
324         list_for_each(p, &s->q_predma.list) {
325                 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
326
327                 /* YUV UV Offset from Y Buffer */
328                 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
329                         offset = uv_offset;
330                         y_done = 1;
331                 }
332                 s->sg_pending[idx].src = buf->dma_handle;
333                 s->sg_pending[idx].dst = offset;
334                 s->sg_pending[idx].size = buf->bytesused;
335
336                 offset += buf->bytesused;
337                 bytes_written += buf->bytesused;
338
339                 /* Sync SG buffers */
340                 ivtv_buf_sync_for_device(s, buf);
341                 idx++;
342         }
343         s->sg_pending_size = idx;
344
345         /* Sync Hardware SG List of buffers */
346         ivtv_stream_sync_for_device(s);
347         if (lock)
348                 spin_lock_irqsave(&itv->dma_reg_lock, flags);
349         if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
350                 ivtv_dma_dec_start(s);
351         }
352         else {
353                 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
354         }
355         if (lock)
356                 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
357 }
358
359 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
360 {
361         struct ivtv *itv = s->itv;
362
363         s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
364         s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
365         s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
366         s->sg_processed++;
367         /* Sync Hardware SG List of buffers */
368         ivtv_stream_sync_for_device(s);
369         write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
370         write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
371 }
372
373 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
374 {
375         struct ivtv *itv = s->itv;
376
377         s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
378         s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
379         s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
380         s->sg_processed++;
381         /* Sync Hardware SG List of buffers */
382         ivtv_stream_sync_for_device(s);
383         write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
384         write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
385 }
386
387 /* start the encoder DMA */
388 static void ivtv_dma_enc_start(struct ivtv_stream *s)
389 {
390         struct ivtv *itv = s->itv;
391         struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
392         int i;
393
394         IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
395
396         if (s->q_predma.bytesused)
397                 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
398
399         if (ivtv_use_dma(s))
400                 s->sg_pending[s->sg_pending_size - 1].size += 256;
401
402         /* If this is an MPEG stream, and VBI data is also pending, then append the
403            VBI DMA to the MPEG DMA and transfer both sets of data at once.
404
405            VBI DMA is a second class citizen compared to MPEG and mixing them together
406            will confuse the firmware (the end of a VBI DMA is seen as the end of a
407            MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
408            sure we only use the MPEG DMA to transfer the VBI DMA if both are in
409            use. This way no conflicts occur. */
410         clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
411         if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
412                         s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
413                 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
414                 if (ivtv_use_dma(s_vbi))
415                         s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
416                 for (i = 0; i < s_vbi->sg_pending_size; i++) {
417                         s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
418                 }
419                 s_vbi->dma_offset = s_vbi->pending_offset;
420                 s_vbi->sg_pending_size = 0;
421                 s_vbi->dma_xfer_cnt++;
422                 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
423                 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
424         }
425
426         s->dma_xfer_cnt++;
427         memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
428         s->sg_processing_size = s->sg_pending_size;
429         s->sg_pending_size = 0;
430         s->sg_processed = 0;
431         s->dma_offset = s->pending_offset;
432         s->dma_backup = s->pending_backup;
433         s->dma_pts = s->pending_pts;
434
435         if (ivtv_use_pio(s)) {
436                 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
437                 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
438                 set_bit(IVTV_F_I_PIO, &itv->i_flags);
439                 itv->cur_pio_stream = s->type;
440         }
441         else {
442                 itv->dma_retries = 0;
443                 ivtv_dma_enc_start_xfer(s);
444                 set_bit(IVTV_F_I_DMA, &itv->i_flags);
445                 itv->cur_dma_stream = s->type;
446                 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
447                 add_timer(&itv->dma_timer);
448         }
449 }
450
451 static void ivtv_dma_dec_start(struct ivtv_stream *s)
452 {
453         struct ivtv *itv = s->itv;
454
455         if (s->q_predma.bytesused)
456                 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
457         s->dma_xfer_cnt++;
458         memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
459         s->sg_processing_size = s->sg_pending_size;
460         s->sg_pending_size = 0;
461         s->sg_processed = 0;
462
463         IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
464         itv->dma_retries = 0;
465         ivtv_dma_dec_start_xfer(s);
466         set_bit(IVTV_F_I_DMA, &itv->i_flags);
467         itv->cur_dma_stream = s->type;
468         itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
469         add_timer(&itv->dma_timer);
470 }
471
472 static void ivtv_irq_dma_read(struct ivtv *itv)
473 {
474         struct ivtv_stream *s = NULL;
475         struct ivtv_buffer *buf;
476         int hw_stream_type = 0;
477
478         IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
479         if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) {
480                 del_timer(&itv->dma_timer);
481                 return;
482         }
483
484         if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
485                 s = &itv->streams[itv->cur_dma_stream];
486                 ivtv_stream_sync_for_cpu(s);
487
488                 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
489                         IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
490                                         read_reg(IVTV_REG_DMASTATUS),
491                                         s->sg_processed, s->sg_processing_size, itv->dma_retries);
492                         write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
493                         if (itv->dma_retries == 3) {
494                                 /* Too many retries, give up on this frame */
495                                 itv->dma_retries = 0;
496                                 s->sg_processed = s->sg_processing_size;
497                         }
498                         else {
499                                 /* Retry, starting with the first xfer segment.
500                                    Just retrying the current segment is not sufficient. */
501                                 s->sg_processed = 0;
502                                 itv->dma_retries++;
503                         }
504                 }
505                 if (s->sg_processed < s->sg_processing_size) {
506                         /* DMA next buffer */
507                         ivtv_dma_dec_start_xfer(s);
508                         return;
509                 }
510                 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
511                         hw_stream_type = 2;
512                 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
513
514                 /* For some reason must kick the firmware, like PIO mode,
515                    I think this tells the firmware we are done and the size
516                    of the xfer so it can calculate what we need next.
517                    I think we can do this part ourselves but would have to
518                    fully calculate xfer info ourselves and not use interrupts
519                  */
520                 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
521                                 hw_stream_type);
522
523                 /* Free last DMA call */
524                 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
525                         ivtv_buf_sync_for_cpu(s, buf);
526                         ivtv_enqueue(s, buf, &s->q_free);
527                 }
528                 wake_up(&s->waitq);
529         }
530         del_timer(&itv->dma_timer);
531         clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
532         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
533         itv->cur_dma_stream = -1;
534         wake_up(&itv->dma_waitq);
535 }
536
537 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
538 {
539         u32 data[CX2341X_MBOX_MAX_DATA];
540         struct ivtv_stream *s;
541
542         ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
543         IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
544         if (itv->cur_dma_stream < 0) {
545                 del_timer(&itv->dma_timer);
546                 return;
547         }
548         s = &itv->streams[itv->cur_dma_stream];
549         ivtv_stream_sync_for_cpu(s);
550
551         if (data[0] & 0x18) {
552                 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
553                         s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
554                 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
555                 if (itv->dma_retries == 3) {
556                         /* Too many retries, give up on this frame */
557                         itv->dma_retries = 0;
558                         s->sg_processed = s->sg_processing_size;
559                 }
560                 else {
561                         /* Retry, starting with the first xfer segment.
562                            Just retrying the current segment is not sufficient. */
563                         s->sg_processed = 0;
564                         itv->dma_retries++;
565                 }
566         }
567         if (s->sg_processed < s->sg_processing_size) {
568                 /* DMA next buffer */
569                 ivtv_dma_enc_start_xfer(s);
570                 return;
571         }
572         del_timer(&itv->dma_timer);
573         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
574         itv->cur_dma_stream = -1;
575         dma_post(s);
576         if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
577                 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
578                 dma_post(s);
579         }
580         s->sg_processing_size = 0;
581         s->sg_processed = 0;
582         wake_up(&itv->dma_waitq);
583 }
584
585 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
586 {
587         struct ivtv_stream *s;
588
589         if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
590                 itv->cur_pio_stream = -1;
591                 return;
592         }
593         s = &itv->streams[itv->cur_pio_stream];
594         IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
595         clear_bit(IVTV_F_I_PIO, &itv->i_flags);
596         itv->cur_pio_stream = -1;
597         dma_post(s);
598         if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
599                 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
600         else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
601                 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
602         else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
603                 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
604         clear_bit(IVTV_F_I_PIO, &itv->i_flags);
605         if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
606                 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
607                 dma_post(s);
608         }
609         wake_up(&itv->dma_waitq);
610 }
611
612 static void ivtv_irq_dma_err(struct ivtv *itv)
613 {
614         u32 data[CX2341X_MBOX_MAX_DATA];
615
616         del_timer(&itv->dma_timer);
617         ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
618         IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
619                                 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
620         write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
621         if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
622             itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
623                 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
624
625                 /* retry */
626                 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
627                         ivtv_dma_dec_start(s);
628                 else
629                         ivtv_dma_enc_start(s);
630                 return;
631         }
632         if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
633                 ivtv_udma_start(itv);
634                 return;
635         }
636         clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
637         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
638         itv->cur_dma_stream = -1;
639         wake_up(&itv->dma_waitq);
640 }
641
642 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
643 {
644         u32 data[CX2341X_MBOX_MAX_DATA];
645         struct ivtv_stream *s;
646
647         /* Get DMA destination and size arguments from card */
648         ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
649         IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
650
651         if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
652                 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
653                                 data[0], data[1], data[2]);
654                 return;
655         }
656         s = &itv->streams[ivtv_stream_map[data[0]]];
657         if (!stream_enc_dma_append(s, data)) {
658                 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
659         }
660 }
661
662 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
663 {
664         struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
665         u32 data[CX2341X_MBOX_MAX_DATA];
666         struct ivtv_stream *s;
667
668         IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
669         s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
670
671         /* If more than two VBI buffers are pending, then
672            clear the old ones and start with this new one.
673            This can happen during transition stages when MPEG capturing is
674            started, but the first interrupts haven't arrived yet. During
675            that period VBI requests can accumulate without being able to
676            DMA the data. Since at most four VBI DMA buffers are available,
677            we just drop the old requests when there are already three
678            requests queued. */
679         if (s->sg_pending_size > 2) {
680                 struct list_head *p;
681                 list_for_each(p, &s->q_predma.list) {
682                         struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
683                         ivtv_buf_sync_for_cpu(s, buf);
684                 }
685                 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
686                 s->sg_pending_size = 0;
687         }
688         /* if we can append the data, and the MPEG stream isn't capturing,
689            then start a DMA request for just the VBI data. */
690         if (!stream_enc_dma_append(s, data) &&
691                         !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
692                 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
693         }
694 }
695
696 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
697 {
698         u32 data[CX2341X_MBOX_MAX_DATA];
699         struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
700
701         IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
702         if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
703                         !stream_enc_dma_append(s, data)) {
704                 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
705         }
706 }
707
708 static void ivtv_irq_dec_data_req(struct ivtv *itv)
709 {
710         u32 data[CX2341X_MBOX_MAX_DATA];
711         struct ivtv_stream *s;
712
713         /* YUV or MPG */
714         ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
715
716         if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
717                 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
718                 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
719                 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
720         }
721         else {
722                 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
723                 itv->dma_data_req_offset = data[1];
724                 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
725         }
726         IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
727                        itv->dma_data_req_offset, itv->dma_data_req_size);
728         if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
729                 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
730         }
731         else {
732                 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
733                 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
734                 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
735         }
736 }
737
738 static void ivtv_irq_vsync(struct ivtv *itv)
739 {
740         /* The vsync interrupt is unusual in that it won't clear until
741          * the end of the first line for the current field, at which
742          * point it clears itself. This can result in repeated vsync
743          * interrupts, or a missed vsync. Read some of the registers
744          * to determine the line being displayed and ensure we handle
745          * one vsync per frame.
746          */
747         unsigned int frame = read_reg(0x28c0) & 1;
748         int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
749
750         if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
751
752         if (((frame ^ itv->yuv_info.sync_field[last_dma_frame]) == 0 &&
753                 ((itv->last_vsync_frame & 1) ^ itv->yuv_info.sync_field[last_dma_frame])) ||
754                         (frame != (itv->last_vsync_frame & 1) && !itv->yuv_info.frame_interlaced)) {
755                 int next_dma_frame = last_dma_frame;
756
757                 if (!(itv->yuv_info.frame_interlaced && itv->yuv_info.field_delay[next_dma_frame] && itv->yuv_info.fields_lapsed < 1)) {
758                         if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
759                                 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
760                                 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
761                                 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
762                                 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
763                                 next_dma_frame = (next_dma_frame + 1) & 0x3;
764                                 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
765                                 itv->yuv_info.fields_lapsed = -1;
766                         }
767                 }
768         }
769         if (frame != (itv->last_vsync_frame & 1)) {
770                 struct ivtv_stream *s = ivtv_get_output_stream(itv);
771
772                 itv->last_vsync_frame += 1;
773                 if (frame == 0) {
774                         clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
775                         clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
776                 }
777                 else {
778                         set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
779                 }
780                 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
781                         set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
782                         wake_up(&itv->event_waitq);
783                 }
784                 wake_up(&itv->vsync_waitq);
785                 if (s)
786                         wake_up(&s->waitq);
787
788                 /* Send VBI to saa7127 */
789                 if (frame) {
790                         set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
791                         set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
792                 }
793
794                 /* Check if we need to update the yuv registers */
795                 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
796                         if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
797                                 last_dma_frame = (last_dma_frame - 1) & 3;
798
799                         if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
800                                 itv->yuv_info.update_frame = last_dma_frame;
801                                 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
802                                 itv->yuv_info.yuv_forced_update = 0;
803                                 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
804                                 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
805                         }
806                 }
807
808                 itv->yuv_info.fields_lapsed ++;
809         }
810 }
811
812 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ)
813
814 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
815 {
816         struct ivtv *itv = (struct ivtv *)dev_id;
817         u32 combo;
818         u32 stat;
819         int i;
820         u8 vsync_force = 0;
821
822         spin_lock(&itv->dma_reg_lock);
823         /* get contents of irq status register */
824         stat = read_reg(IVTV_REG_IRQSTATUS);
825
826         combo = ~itv->irqmask & stat;
827
828         /* Clear out IRQ */
829         if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
830
831         if (0 == combo) {
832                 /* The vsync interrupt is unusual and clears itself. If we
833                  * took too long, we may have missed it. Do some checks
834                  */
835                 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
836                         /* vsync is enabled, see if we're in a new field */
837                         if ((itv->last_vsync_frame & 1) != (read_reg(0x28c0) & 1)) {
838                                 /* New field, looks like we missed it */
839                                 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
840                                 vsync_force = 1;
841                         }
842                 }
843
844                 if (!vsync_force) {
845                         /* No Vsync expected, wasn't for us */
846                         spin_unlock(&itv->dma_reg_lock);
847                         return IRQ_NONE;
848                 }
849         }
850
851         /* Exclude interrupts noted below from the output, otherwise the log is flooded with
852            these messages */
853         if (combo & ~0xff6d0400)
854                 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
855
856         if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
857                 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
858         }
859
860         if (combo & IVTV_IRQ_DMA_READ) {
861                 ivtv_irq_dma_read(itv);
862         }
863
864         if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
865                 ivtv_irq_enc_dma_complete(itv);
866         }
867
868         if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
869                 ivtv_irq_enc_pio_complete(itv);
870         }
871
872         if (combo & IVTV_IRQ_DMA_ERR) {
873                 ivtv_irq_dma_err(itv);
874         }
875
876         if (combo & IVTV_IRQ_ENC_START_CAP) {
877                 ivtv_irq_enc_start_cap(itv);
878         }
879
880         if (combo & IVTV_IRQ_ENC_VBI_CAP) {
881                 ivtv_irq_enc_vbi_cap(itv);
882         }
883
884         if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
885                 ivtv_irq_dec_vbi_reinsert(itv);
886         }
887
888         if (combo & IVTV_IRQ_ENC_EOS) {
889                 IVTV_DEBUG_IRQ("ENC EOS\n");
890                 set_bit(IVTV_F_I_EOS, &itv->i_flags);
891                 wake_up(&itv->eos_waitq);
892         }
893
894         if (combo & IVTV_IRQ_DEC_DATA_REQ) {
895                 ivtv_irq_dec_data_req(itv);
896         }
897
898         /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
899         if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
900                 ivtv_irq_vsync(itv);
901         }
902
903         if (combo & IVTV_IRQ_ENC_VIM_RST) {
904                 IVTV_DEBUG_IRQ("VIM RST\n");
905                 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
906         }
907
908         if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
909                 IVTV_DEBUG_INFO("Stereo mode changed\n");
910         }
911
912         if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
913                 itv->irq_rr_idx++;
914                 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
915                         int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
916                         struct ivtv_stream *s = &itv->streams[idx];
917
918                         if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
919                                 continue;
920                         if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
921                                 ivtv_dma_dec_start(s);
922                         else
923                                 ivtv_dma_enc_start(s);
924                         break;
925                 }
926                 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
927                         ivtv_udma_start(itv);
928                 }
929         }
930
931         if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
932                 itv->irq_rr_idx++;
933                 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
934                         int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
935                         struct ivtv_stream *s = &itv->streams[idx];
936
937                         if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
938                                 continue;
939                         if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
940                                 ivtv_dma_enc_start(s);
941                         break;
942                 }
943         }
944
945         if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags))
946                 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
947
948         spin_unlock(&itv->dma_reg_lock);
949
950         /* If we've just handled a 'forced' vsync, it's safest to say it
951          * wasn't ours. Another device may have triggered it at just
952          * the right time.
953          */
954         return vsync_force ? IRQ_NONE : IRQ_HANDLED;
955 }
956
957 void ivtv_unfinished_dma(unsigned long arg)
958 {
959         struct ivtv *itv = (struct ivtv *)arg;
960
961         if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
962                 return;
963         IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
964
965         write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
966         clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
967         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
968         itv->cur_dma_stream = -1;
969         wake_up(&itv->dma_waitq);
970 }