2 * Driver for the Conexant CX23885 PCIe bridge
4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kmod.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <asm/div64.h>
35 #include "cx23888-ir.h"
36 #include "cx23885-ir.h"
37 #include "cx23885-av.h"
38 #include "cx23885-input.h"
40 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
41 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
42 MODULE_LICENSE("GPL");
44 static unsigned int debug;
45 module_param(debug, int, 0644);
46 MODULE_PARM_DESC(debug, "enable debug messages");
48 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
49 module_param_array(card, int, NULL, 0444);
50 MODULE_PARM_DESC(card, "card type");
52 #define dprintk(level, fmt, arg...)\
53 do { if (debug >= level)\
54 printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
57 static unsigned int cx23885_devcount;
59 #define NO_SYNC_LINE (-1U)
61 /* FIXME, these allocations will change when
62 * analog arrives. The be reviewed.
64 * 1 line = 16 bytes of CDT
66 * cdt size = 16 * linesize
71 * 0x00000000 0x00008fff FIFO clusters
72 * 0x00010000 0x000104af Channel Management Data Structures
73 * 0x000104b0 0x000104ff Free
74 * 0x00010500 0x000108bf 15 channels * iqsize
75 * 0x000108c0 0x000108ff Free
76 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
77 * 15 channels * (iqsize + (maxlines * linesize))
78 * 0x00010ea0 0x00010xxx Free
81 static struct sram_channel cx23885_sram_channels[] = {
84 .cmds_start = 0x10000,
85 .ctrl_start = 0x10380,
89 .ptr1_reg = DMA1_PTR1,
90 .ptr2_reg = DMA1_PTR2,
91 .cnt1_reg = DMA1_CNT1,
92 .cnt2_reg = DMA1_CNT2,
101 .ptr1_reg = DMA2_PTR1,
102 .ptr2_reg = DMA2_PTR2,
103 .cnt1_reg = DMA2_CNT1,
104 .cnt2_reg = DMA2_CNT2,
108 .cmds_start = 0x100A0,
109 .ctrl_start = 0x10400,
111 .fifo_start = 0x5000,
113 .ptr1_reg = DMA3_PTR1,
114 .ptr2_reg = DMA3_PTR2,
115 .cnt1_reg = DMA3_CNT1,
116 .cnt2_reg = DMA3_CNT2,
125 .ptr1_reg = DMA4_PTR1,
126 .ptr2_reg = DMA4_PTR2,
127 .cnt1_reg = DMA4_CNT1,
128 .cnt2_reg = DMA4_CNT2,
137 .ptr1_reg = DMA5_PTR1,
138 .ptr2_reg = DMA5_PTR2,
139 .cnt1_reg = DMA5_CNT1,
140 .cnt2_reg = DMA5_CNT2,
144 .cmds_start = 0x10140,
145 .ctrl_start = 0x10440,
147 .fifo_start = 0x6000,
149 .ptr1_reg = DMA5_PTR1,
150 .ptr2_reg = DMA5_PTR2,
151 .cnt1_reg = DMA5_CNT1,
152 .cnt2_reg = DMA5_CNT2,
161 .ptr1_reg = DMA6_PTR1,
162 .ptr2_reg = DMA6_PTR2,
163 .cnt1_reg = DMA6_CNT1,
164 .cnt2_reg = DMA6_CNT2,
173 .ptr1_reg = DMA7_PTR1,
174 .ptr2_reg = DMA7_PTR2,
175 .cnt1_reg = DMA7_CNT1,
176 .cnt2_reg = DMA7_CNT2,
185 .ptr1_reg = DMA8_PTR1,
186 .ptr2_reg = DMA8_PTR2,
187 .cnt1_reg = DMA8_CNT1,
188 .cnt2_reg = DMA8_CNT2,
192 static struct sram_channel cx23887_sram_channels[] = {
195 .cmds_start = 0x10000,
196 .ctrl_start = 0x105b0,
200 .ptr1_reg = DMA1_PTR1,
201 .ptr2_reg = DMA1_PTR2,
202 .cnt1_reg = DMA1_CNT1,
203 .cnt2_reg = DMA1_CNT2,
212 .ptr1_reg = DMA2_PTR1,
213 .ptr2_reg = DMA2_PTR2,
214 .cnt1_reg = DMA2_CNT1,
215 .cnt2_reg = DMA2_CNT2,
219 .cmds_start = 0x100A0,
220 .ctrl_start = 0x10630,
222 .fifo_start = 0x5000,
224 .ptr1_reg = DMA3_PTR1,
225 .ptr2_reg = DMA3_PTR2,
226 .cnt1_reg = DMA3_CNT1,
227 .cnt2_reg = DMA3_CNT2,
236 .ptr1_reg = DMA4_PTR1,
237 .ptr2_reg = DMA4_PTR2,
238 .cnt1_reg = DMA4_CNT1,
239 .cnt2_reg = DMA4_CNT2,
248 .ptr1_reg = DMA5_PTR1,
249 .ptr2_reg = DMA5_PTR2,
250 .cnt1_reg = DMA5_CNT1,
251 .cnt2_reg = DMA5_CNT2,
255 .cmds_start = 0x10140,
256 .ctrl_start = 0x10670,
258 .fifo_start = 0x6000,
260 .ptr1_reg = DMA5_PTR1,
261 .ptr2_reg = DMA5_PTR2,
262 .cnt1_reg = DMA5_CNT1,
263 .cnt2_reg = DMA5_CNT2,
272 .ptr1_reg = DMA6_PTR1,
273 .ptr2_reg = DMA6_PTR2,
274 .cnt1_reg = DMA6_CNT1,
275 .cnt2_reg = DMA6_CNT2,
284 .ptr1_reg = DMA7_PTR1,
285 .ptr2_reg = DMA7_PTR2,
286 .cnt1_reg = DMA7_CNT1,
287 .cnt2_reg = DMA7_CNT2,
296 .ptr1_reg = DMA8_PTR1,
297 .ptr2_reg = DMA8_PTR2,
298 .cnt1_reg = DMA8_CNT1,
299 .cnt2_reg = DMA8_CNT2,
303 void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
306 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
308 dev->pci_irqmask |= mask;
310 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
313 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
316 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
318 dev->pci_irqmask |= mask;
319 cx_set(PCI_INT_MSK, mask);
321 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
324 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
328 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
330 v = mask & dev->pci_irqmask;
332 cx_set(PCI_INT_MSK, v);
334 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
337 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
339 cx23885_irq_enable(dev, 0xffffffff);
342 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
345 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
347 cx_clear(PCI_INT_MSK, mask);
349 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
352 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
354 cx23885_irq_disable(dev, 0xffffffff);
357 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
360 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
362 dev->pci_irqmask &= ~mask;
363 cx_clear(PCI_INT_MSK, mask);
365 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
368 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
372 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
374 v = cx_read(PCI_INT_MSK);
376 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
380 static int cx23885_risc_decode(u32 risc)
382 static char *instr[16] = {
383 [RISC_SYNC >> 28] = "sync",
384 [RISC_WRITE >> 28] = "write",
385 [RISC_WRITEC >> 28] = "writec",
386 [RISC_READ >> 28] = "read",
387 [RISC_READC >> 28] = "readc",
388 [RISC_JUMP >> 28] = "jump",
389 [RISC_SKIP >> 28] = "skip",
390 [RISC_WRITERM >> 28] = "writerm",
391 [RISC_WRITECM >> 28] = "writecm",
392 [RISC_WRITECR >> 28] = "writecr",
394 static int incr[16] = {
395 [RISC_WRITE >> 28] = 3,
396 [RISC_JUMP >> 28] = 3,
397 [RISC_SKIP >> 28] = 1,
398 [RISC_SYNC >> 28] = 1,
399 [RISC_WRITERM >> 28] = 3,
400 [RISC_WRITECM >> 28] = 3,
401 [RISC_WRITECR >> 28] = 4,
403 static char *bits[] = {
404 "12", "13", "14", "resync",
405 "cnt0", "cnt1", "18", "19",
406 "20", "21", "22", "23",
407 "irq1", "irq2", "eol", "sol",
411 printk("0x%08x [ %s", risc,
412 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
413 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
414 if (risc & (1 << (i + 12)))
415 printk(" %s", bits[i]);
416 printk(" count=%d ]\n", risc & 0xfff);
417 return incr[risc >> 28] ? incr[risc >> 28] : 1;
420 void cx23885_wakeup(struct cx23885_tsport *port,
421 struct cx23885_dmaqueue *q, u32 count)
423 struct cx23885_dev *dev = port->dev;
424 struct cx23885_buffer *buf;
427 for (bc = 0;; bc++) {
428 if (list_empty(&q->active))
430 buf = list_entry(q->active.next,
431 struct cx23885_buffer, vb.queue);
433 /* count comes from the hw and is is 16bit wide --
434 * this trick handles wrap-arounds correctly for
435 * up to 32767 buffers in flight... */
436 if ((s16) (count - buf->count) < 0)
439 do_gettimeofday(&buf->vb.ts);
440 dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
442 buf->vb.state = VIDEOBUF_DONE;
443 list_del(&buf->vb.queue);
444 wake_up(&buf->vb.done);
446 if (list_empty(&q->active))
447 del_timer(&q->timeout);
449 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
451 printk(KERN_WARNING "%s: %d buffers handled (should be 1)\n",
455 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
456 struct sram_channel *ch,
457 unsigned int bpl, u32 risc)
459 unsigned int i, lines;
462 if (ch->cmds_start == 0) {
463 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
465 cx_write(ch->ptr1_reg, 0);
466 cx_write(ch->ptr2_reg, 0);
467 cx_write(ch->cnt2_reg, 0);
468 cx_write(ch->cnt1_reg, 0);
471 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
475 bpl = (bpl + 7) & ~7; /* alignment */
477 lines = ch->fifo_size / bpl;
482 cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
487 for (i = 0; i < lines; i++) {
488 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
489 ch->fifo_start + bpl*i);
490 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
491 cx_write(cdt + 16*i + 4, 0);
492 cx_write(cdt + 16*i + 8, 0);
493 cx_write(cdt + 16*i + 12, 0);
498 cx_write(ch->cmds_start + 0, 8);
500 cx_write(ch->cmds_start + 0, risc);
501 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
502 cx_write(ch->cmds_start + 8, cdt);
503 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
504 cx_write(ch->cmds_start + 16, ch->ctrl_start);
506 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
508 cx_write(ch->cmds_start + 20, 64 >> 2);
509 for (i = 24; i < 80; i += 4)
510 cx_write(ch->cmds_start + i, 0);
513 cx_write(ch->ptr1_reg, ch->fifo_start);
514 cx_write(ch->ptr2_reg, cdt);
515 cx_write(ch->cnt2_reg, (lines*16) >> 3);
516 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
518 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
527 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
528 struct sram_channel *ch)
530 static char *name[] = {
547 unsigned int i, j, n;
549 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
550 dev->name, ch->name);
551 for (i = 0; i < ARRAY_SIZE(name); i++)
552 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
554 cx_read(ch->cmds_start + 4*i));
556 for (i = 0; i < 4; i++) {
557 risc = cx_read(ch->cmds_start + 4 * (i + 14));
558 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
559 cx23885_risc_decode(risc);
561 for (i = 0; i < (64 >> 2); i += n) {
562 risc = cx_read(ch->ctrl_start + 4 * i);
563 /* No consideration for bits 63-32 */
565 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
566 ch->ctrl_start + 4 * i, i);
567 n = cx23885_risc_decode(risc);
568 for (j = 1; j < n; j++) {
569 risc = cx_read(ch->ctrl_start + 4 * (i + j));
570 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
571 dev->name, i+j, risc, j);
575 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
576 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
577 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
578 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
579 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
580 dev->name, cx_read(ch->ptr1_reg));
581 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
582 dev->name, cx_read(ch->ptr2_reg));
583 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
584 dev->name, cx_read(ch->cnt1_reg));
585 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
586 dev->name, cx_read(ch->cnt2_reg));
589 static void cx23885_risc_disasm(struct cx23885_tsport *port,
590 struct btcx_riscmem *risc)
592 struct cx23885_dev *dev = port->dev;
593 unsigned int i, j, n;
595 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
596 dev->name, risc->cpu, (unsigned long)risc->dma);
597 for (i = 0; i < (risc->size >> 2); i += n) {
598 printk(KERN_INFO "%s: %04d: ", dev->name, i);
599 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
600 for (j = 1; j < n; j++)
601 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
602 dev->name, i + j, risc->cpu[i + j], j);
603 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
608 static void cx23885_shutdown(struct cx23885_dev *dev)
610 /* disable RISC controller */
611 cx_write(DEV_CNTRL2, 0);
613 /* Disable all IR activity */
614 cx_write(IR_CNTRL_REG, 0);
616 /* Disable Video A/B activity */
617 cx_write(VID_A_DMA_CTL, 0);
618 cx_write(VID_B_DMA_CTL, 0);
619 cx_write(VID_C_DMA_CTL, 0);
621 /* Disable Audio activity */
622 cx_write(AUD_INT_DMA_CTL, 0);
623 cx_write(AUD_EXT_DMA_CTL, 0);
625 /* Disable Serial port */
626 cx_write(UART_CTL, 0);
628 /* Disable Interrupts */
629 cx23885_irq_disable_all(dev);
630 cx_write(VID_A_INT_MSK, 0);
631 cx_write(VID_B_INT_MSK, 0);
632 cx_write(VID_C_INT_MSK, 0);
633 cx_write(AUDIO_INT_INT_MSK, 0);
634 cx_write(AUDIO_EXT_INT_MSK, 0);
638 static void cx23885_reset(struct cx23885_dev *dev)
640 dprintk(1, "%s()\n", __func__);
642 cx23885_shutdown(dev);
644 cx_write(PCI_INT_STAT, 0xffffffff);
645 cx_write(VID_A_INT_STAT, 0xffffffff);
646 cx_write(VID_B_INT_STAT, 0xffffffff);
647 cx_write(VID_C_INT_STAT, 0xffffffff);
648 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
649 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
650 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
651 cx_write(PAD_CTRL, 0x00500300);
655 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
657 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
658 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
660 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
661 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
662 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
664 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
665 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
666 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
668 cx23885_gpio_setup(dev);
672 static int cx23885_pci_quirks(struct cx23885_dev *dev)
674 dprintk(1, "%s()\n", __func__);
676 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
677 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
678 * occur on the cx23887 bridge.
680 if (dev->bridge == CX23885_BRIDGE_885)
681 cx_clear(RDR_TLCTL0, 1 << 4);
686 static int get_resources(struct cx23885_dev *dev)
688 if (request_mem_region(pci_resource_start(dev->pci, 0),
689 pci_resource_len(dev->pci, 0),
693 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
694 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
699 static void cx23885_timeout(unsigned long data);
700 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
701 u32 reg, u32 mask, u32 value);
703 static int cx23885_init_tsport(struct cx23885_dev *dev,
704 struct cx23885_tsport *port, int portno)
706 dprintk(1, "%s(portno=%d)\n", __func__, portno);
708 /* Transport bus init dma queue - Common settings */
709 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
710 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
711 port->vld_misc_val = 0x0;
712 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
714 spin_lock_init(&port->slock);
718 INIT_LIST_HEAD(&port->mpegq.active);
719 INIT_LIST_HEAD(&port->mpegq.queued);
720 port->mpegq.timeout.function = cx23885_timeout;
721 port->mpegq.timeout.data = (unsigned long)port;
722 init_timer(&port->mpegq.timeout);
724 mutex_init(&port->frontends.lock);
725 INIT_LIST_HEAD(&port->frontends.felist);
726 port->frontends.active_fe_id = 0;
728 /* This should be hardcoded allow a single frontend
729 * attachment to this tsport, keeping the -dvb.c
730 * code clean and safe.
732 if (!port->num_frontends)
733 port->num_frontends = 1;
737 port->reg_gpcnt = VID_B_GPCNT;
738 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
739 port->reg_dma_ctl = VID_B_DMA_CTL;
740 port->reg_lngth = VID_B_LNGTH;
741 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
742 port->reg_gen_ctrl = VID_B_GEN_CTL;
743 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
744 port->reg_sop_status = VID_B_SOP_STATUS;
745 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
746 port->reg_vld_misc = VID_B_VLD_MISC;
747 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
748 port->reg_src_sel = VID_B_SRC_SEL;
749 port->reg_ts_int_msk = VID_B_INT_MSK;
750 port->reg_ts_int_stat = VID_B_INT_STAT;
751 port->sram_chno = SRAM_CH03; /* VID_B */
752 port->pci_irqmask = 0x02; /* VID_B bit1 */
755 port->reg_gpcnt = VID_C_GPCNT;
756 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
757 port->reg_dma_ctl = VID_C_DMA_CTL;
758 port->reg_lngth = VID_C_LNGTH;
759 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
760 port->reg_gen_ctrl = VID_C_GEN_CTL;
761 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
762 port->reg_sop_status = VID_C_SOP_STATUS;
763 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
764 port->reg_vld_misc = VID_C_VLD_MISC;
765 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
766 port->reg_src_sel = 0;
767 port->reg_ts_int_msk = VID_C_INT_MSK;
768 port->reg_ts_int_stat = VID_C_INT_STAT;
769 port->sram_chno = SRAM_CH06; /* VID_C */
770 port->pci_irqmask = 0x04; /* VID_C bit2 */
776 cx23885_risc_stopper(dev->pci, &port->mpegq.stopper,
777 port->reg_dma_ctl, port->dma_ctl_val, 0x00);
782 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
784 switch (cx_read(RDR_CFG2) & 0xff) {
787 dev->hwrevision = 0xa0;
791 dev->hwrevision = 0xa1;
794 /* CX23885-13Z/14Z */
795 dev->hwrevision = 0xb0;
798 if (dev->pci->device == 0x8880) {
799 /* CX23888-21Z/22Z */
800 dev->hwrevision = 0xc0;
803 dev->hwrevision = 0xa4;
807 if (dev->pci->device == 0x8880) {
809 dev->hwrevision = 0xd0;
811 /* CX23885-15Z, CX23888-31Z */
812 dev->hwrevision = 0xa5;
817 dev->hwrevision = 0xc0;
821 dev->hwrevision = 0xb1;
824 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
825 __func__, dev->hwrevision);
828 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
829 __func__, dev->hwrevision);
831 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
832 __func__, dev->hwrevision);
835 /* Find the first v4l2_subdev member of the group id in hw */
836 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
838 struct v4l2_subdev *result = NULL;
839 struct v4l2_subdev *sd;
841 spin_lock(&dev->v4l2_dev.lock);
842 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
843 if (sd->grp_id == hw) {
848 spin_unlock(&dev->v4l2_dev.lock);
852 static int cx23885_dev_setup(struct cx23885_dev *dev)
856 spin_lock_init(&dev->pci_irqmask_lock);
858 mutex_init(&dev->lock);
859 mutex_init(&dev->gpio_lock);
861 atomic_inc(&dev->refcount);
863 dev->nr = cx23885_devcount++;
864 sprintf(dev->name, "cx23885[%d]", dev->nr);
866 /* Configure the internal memory */
867 if (dev->pci->device == 0x8880) {
868 /* Could be 887 or 888, assume a default */
869 dev->bridge = CX23885_BRIDGE_887;
870 /* Apply a sensible clock frequency for the PCIe bridge */
871 dev->clk_freq = 25000000;
872 dev->sram_channels = cx23887_sram_channels;
874 if (dev->pci->device == 0x8852) {
875 dev->bridge = CX23885_BRIDGE_885;
876 /* Apply a sensible clock frequency for the PCIe bridge */
877 dev->clk_freq = 28000000;
878 dev->sram_channels = cx23885_sram_channels;
882 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
883 __func__, dev->bridge);
887 if (card[dev->nr] < cx23885_bcount)
888 dev->board = card[dev->nr];
889 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
890 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
891 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
892 dev->board = cx23885_subids[i].card;
893 if (UNSET == dev->board) {
894 dev->board = CX23885_BOARD_UNKNOWN;
895 cx23885_card_list(dev);
898 /* If the user specific a clk freq override, apply it */
899 if (cx23885_boards[dev->board].clk_freq > 0)
900 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
902 dev->pci_bus = dev->pci->bus->number;
903 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
904 cx23885_irq_add(dev, 0x001f00);
905 if (cx23885_boards[dev->board].cimax > 0)
906 cx23885_irq_add(dev, 0x01800000); /* for CiMaxes */
908 /* External Master 1 Bus */
909 dev->i2c_bus[0].nr = 0;
910 dev->i2c_bus[0].dev = dev;
911 dev->i2c_bus[0].reg_stat = I2C1_STAT;
912 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
913 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
914 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
915 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
916 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
918 /* External Master 2 Bus */
919 dev->i2c_bus[1].nr = 1;
920 dev->i2c_bus[1].dev = dev;
921 dev->i2c_bus[1].reg_stat = I2C2_STAT;
922 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
923 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
924 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
925 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
926 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
928 /* Internal Master 3 Bus */
929 dev->i2c_bus[2].nr = 2;
930 dev->i2c_bus[2].dev = dev;
931 dev->i2c_bus[2].reg_stat = I2C3_STAT;
932 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
933 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
934 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
935 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
936 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
938 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
939 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
940 cx23885_init_tsport(dev, &dev->ts1, 1);
942 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
943 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
944 cx23885_init_tsport(dev, &dev->ts2, 2);
946 if (get_resources(dev) < 0) {
947 printk(KERN_ERR "CORE %s No more PCIe resources for "
948 "subsystem: %04x:%04x\n",
949 dev->name, dev->pci->subsystem_vendor,
950 dev->pci->subsystem_device);
957 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
958 pci_resource_len(dev->pci, 0));
960 dev->bmmio = (u8 __iomem *)dev->lmmio;
962 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
963 dev->name, dev->pci->subsystem_vendor,
964 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
965 dev->board, card[dev->nr] == dev->board ?
966 "insmod option" : "autodetected");
968 cx23885_pci_quirks(dev);
970 /* Assume some sensible defaults */
971 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
972 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
973 dev->radio_type = cx23885_boards[dev->board].radio_type;
974 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
976 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x\n",
977 __func__, dev->tuner_type, dev->tuner_addr);
978 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
979 __func__, dev->radio_type, dev->radio_addr);
981 /* The cx23417 encoder has GPIO's that need to be initialised
982 * before DVB, so that demodulators and tuners are out of
983 * reset before DVB uses them.
985 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
986 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
987 cx23885_mc417_init(dev);
992 cx23885_i2c_register(&dev->i2c_bus[0]);
993 cx23885_i2c_register(&dev->i2c_bus[1]);
994 cx23885_i2c_register(&dev->i2c_bus[2]);
995 cx23885_card_setup(dev);
996 call_all(dev, core, s_power, 0);
997 cx23885_ir_init(dev);
999 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1000 if (cx23885_video_register(dev) < 0) {
1001 printk(KERN_ERR "%s() Failed to register analog "
1002 "video adapters on VID_A\n", __func__);
1006 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1007 if (cx23885_dvb_register(&dev->ts1) < 0) {
1008 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
1012 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1013 if (cx23885_417_register(dev) < 0) {
1015 "%s() Failed to register 417 on VID_B\n",
1020 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1021 if (cx23885_dvb_register(&dev->ts2) < 0) {
1023 "%s() Failed to register dvb on VID_C\n",
1027 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1028 if (cx23885_417_register(dev) < 0) {
1030 "%s() Failed to register 417 on VID_C\n",
1035 cx23885_dev_checkrevision(dev);
1040 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1042 release_mem_region(pci_resource_start(dev->pci, 0),
1043 pci_resource_len(dev->pci, 0));
1045 if (!atomic_dec_and_test(&dev->refcount))
1048 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1049 cx23885_video_unregister(dev);
1051 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1052 cx23885_dvb_unregister(&dev->ts1);
1054 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1055 cx23885_417_unregister(dev);
1057 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1058 cx23885_dvb_unregister(&dev->ts2);
1060 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1061 cx23885_417_unregister(dev);
1063 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1064 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1065 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1067 iounmap(dev->lmmio);
1070 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1071 unsigned int offset, u32 sync_line,
1072 unsigned int bpl, unsigned int padding,
1075 struct scatterlist *sg;
1076 unsigned int line, todo;
1078 /* sync instruction */
1079 if (sync_line != NO_SYNC_LINE)
1080 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1084 for (line = 0; line < lines; line++) {
1085 while (offset && offset >= sg_dma_len(sg)) {
1086 offset -= sg_dma_len(sg);
1089 if (bpl <= sg_dma_len(sg)-offset) {
1090 /* fits into current chunk */
1091 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl);
1092 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1093 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1096 /* scanline needs to be split */
1098 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|
1099 (sg_dma_len(sg)-offset));
1100 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1101 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1102 todo -= (sg_dma_len(sg)-offset);
1105 while (todo > sg_dma_len(sg)) {
1106 *(rp++) = cpu_to_le32(RISC_WRITE|
1108 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1109 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1110 todo -= sg_dma_len(sg);
1113 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1114 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1115 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1124 int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1125 struct scatterlist *sglist, unsigned int top_offset,
1126 unsigned int bottom_offset, unsigned int bpl,
1127 unsigned int padding, unsigned int lines)
1129 u32 instructions, fields;
1134 if (UNSET != top_offset)
1136 if (UNSET != bottom_offset)
1139 /* estimate risc mem: worst case is one write per page border +
1140 one write per scan line + syncs + jump (all 2 dwords). Padding
1141 can cause next bpl to start close to a page border. First DMA
1142 region may be smaller than PAGE_SIZE */
1143 /* write and jump need and extra dword */
1144 instructions = fields * (1 + ((bpl + padding) * lines)
1145 / PAGE_SIZE + lines);
1147 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1151 /* write risc instructions */
1153 if (UNSET != top_offset)
1154 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1155 bpl, padding, lines);
1156 if (UNSET != bottom_offset)
1157 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1158 bpl, padding, lines);
1160 /* save pointer to jmp instruction address */
1162 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1166 static int cx23885_risc_databuffer(struct pci_dev *pci,
1167 struct btcx_riscmem *risc,
1168 struct scatterlist *sglist,
1176 /* estimate risc mem: worst case is one write per page border +
1177 one write per scan line + syncs + jump (all 2 dwords). Here
1178 there is no padding and no sync. First DMA region may be smaller
1180 /* Jump and write need an extra dword */
1181 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1184 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1188 /* write risc instructions */
1190 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines);
1192 /* save pointer to jmp instruction address */
1194 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1198 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
1199 u32 reg, u32 mask, u32 value)
1204 rc = btcx_riscmem_alloc(pci, risc, 4*16);
1208 /* write risc instructions */
1210 *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2);
1211 *(rp++) = cpu_to_le32(reg);
1212 *(rp++) = cpu_to_le32(value);
1213 *(rp++) = cpu_to_le32(mask);
1214 *(rp++) = cpu_to_le32(RISC_JUMP);
1215 *(rp++) = cpu_to_le32(risc->dma);
1216 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1220 void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
1222 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
1224 BUG_ON(in_interrupt());
1225 videobuf_waiton(q, &buf->vb, 0, 0);
1226 videobuf_dma_unmap(q->dev, dma);
1227 videobuf_dma_free(dma);
1228 btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
1229 buf->vb.state = VIDEOBUF_NEEDS_INIT;
1232 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1234 struct cx23885_dev *dev = port->dev;
1236 dprintk(1, "%s() Register Dump\n", __func__);
1237 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1238 cx_read(DEV_CNTRL2));
1239 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1240 cx23885_irq_get_mask(dev));
1241 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1242 cx_read(AUDIO_INT_INT_MSK));
1243 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1244 cx_read(AUD_INT_DMA_CTL));
1245 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1246 cx_read(AUDIO_EXT_INT_MSK));
1247 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1248 cx_read(AUD_EXT_DMA_CTL));
1249 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1251 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1252 cx_read(ALT_PIN_OUT_SEL));
1253 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1255 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1256 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1257 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1258 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1259 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1260 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1261 if (port->reg_src_sel)
1262 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1263 port->reg_src_sel, cx_read(port->reg_src_sel));
1264 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1265 port->reg_lngth, cx_read(port->reg_lngth));
1266 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1267 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1268 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1269 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1270 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1271 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1272 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1273 port->reg_sop_status, cx_read(port->reg_sop_status));
1274 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1275 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1276 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1277 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1278 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1279 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1280 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1281 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1284 static int cx23885_start_dma(struct cx23885_tsport *port,
1285 struct cx23885_dmaqueue *q,
1286 struct cx23885_buffer *buf)
1288 struct cx23885_dev *dev = port->dev;
1291 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1292 buf->vb.width, buf->vb.height, buf->vb.field);
1294 /* Stop the fifo and risc engine for this port */
1295 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1297 /* setup fifo + format */
1298 cx23885_sram_channel_setup(dev,
1299 &dev->sram_channels[port->sram_chno],
1300 port->ts_packet_size, buf->risc.dma);
1302 cx23885_sram_channel_dump(dev,
1303 &dev->sram_channels[port->sram_chno]);
1304 cx23885_risc_disasm(port, &buf->risc);
1307 /* write TS length to chip */
1308 cx_write(port->reg_lngth, buf->vb.width);
1310 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1311 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1312 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1314 cx23885_boards[dev->board].portb,
1315 cx23885_boards[dev->board].portc);
1319 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1320 cx23885_av_clk(dev, 0);
1324 /* If the port supports SRC SELECT, configure it */
1325 if (port->reg_src_sel)
1326 cx_write(port->reg_src_sel, port->src_sel_val);
1328 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1329 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1330 cx_write(port->reg_vld_misc, port->vld_misc_val);
1331 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1334 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1335 /* reset counter to zero */
1336 cx_write(port->reg_gpcnt_ctl, 3);
1339 /* Set VIDB pins to input */
1340 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1341 reg = cx_read(PAD_CTRL);
1342 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1343 cx_write(PAD_CTRL, reg);
1346 /* Set VIDC pins to input */
1347 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1348 reg = cx_read(PAD_CTRL);
1349 reg &= ~0x4; /* Clear TS2_SOP_OE */
1350 cx_write(PAD_CTRL, reg);
1353 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1355 reg = cx_read(PAD_CTRL);
1356 reg = reg & ~0x1; /* Clear TS1_OE */
1358 /* FIXME, bit 2 writing here is questionable */
1359 /* set TS1_SOP_OE and TS1_OE_HI */
1361 cx_write(PAD_CTRL, reg);
1363 /* FIXME and these two registers should be documented. */
1364 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1365 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1368 switch (dev->bridge) {
1369 case CX23885_BRIDGE_885:
1370 case CX23885_BRIDGE_887:
1371 case CX23885_BRIDGE_888:
1373 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1374 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1375 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1376 cx23885_irq_add(dev, port->pci_irqmask);
1377 cx23885_irq_enable_all(dev);
1383 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1385 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1386 cx23885_av_clk(dev, 1);
1389 cx23885_tsport_reg_dump(port);
1394 static int cx23885_stop_dma(struct cx23885_tsport *port)
1396 struct cx23885_dev *dev = port->dev;
1399 dprintk(1, "%s()\n", __func__);
1401 /* Stop interrupts and DMA */
1402 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1403 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1405 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1407 reg = cx_read(PAD_CTRL);
1412 /* clear TS1_SOP_OE and TS1_OE_HI */
1414 cx_write(PAD_CTRL, reg);
1415 cx_write(port->reg_src_sel, 0);
1416 cx_write(port->reg_gen_ctrl, 8);
1420 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1421 cx23885_av_clk(dev, 0);
1426 int cx23885_restart_queue(struct cx23885_tsport *port,
1427 struct cx23885_dmaqueue *q)
1429 struct cx23885_dev *dev = port->dev;
1430 struct cx23885_buffer *buf;
1432 dprintk(5, "%s()\n", __func__);
1433 if (list_empty(&q->active)) {
1434 struct cx23885_buffer *prev;
1437 dprintk(5, "%s() queue is empty\n", __func__);
1440 if (list_empty(&q->queued))
1442 buf = list_entry(q->queued.next, struct cx23885_buffer,
1445 list_del(&buf->vb.queue);
1446 list_add_tail(&buf->vb.queue, &q->active);
1447 cx23885_start_dma(port, q, buf);
1448 buf->vb.state = VIDEOBUF_ACTIVE;
1449 buf->count = q->count++;
1450 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
1451 dprintk(5, "[%p/%d] restart_queue - f/active\n",
1454 } else if (prev->vb.width == buf->vb.width &&
1455 prev->vb.height == buf->vb.height &&
1456 prev->fmt == buf->fmt) {
1457 list_del(&buf->vb.queue);
1458 list_add_tail(&buf->vb.queue, &q->active);
1459 buf->vb.state = VIDEOBUF_ACTIVE;
1460 buf->count = q->count++;
1461 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1462 /* 64 bit bits 63-32 */
1463 prev->risc.jmp[2] = cpu_to_le32(0);
1464 dprintk(5, "[%p/%d] restart_queue - m/active\n",
1474 buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
1475 dprintk(2, "restart_queue [%p/%d]: restart dma\n",
1477 cx23885_start_dma(port, q, buf);
1478 list_for_each_entry(buf, &q->active, vb.queue)
1479 buf->count = q->count++;
1480 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
1484 /* ------------------------------------------------------------------ */
1486 int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
1487 struct cx23885_buffer *buf, enum v4l2_field field)
1489 struct cx23885_dev *dev = port->dev;
1490 int size = port->ts_packet_size * port->ts_packet_count;
1493 dprintk(1, "%s: %p\n", __func__, buf);
1494 if (0 != buf->vb.baddr && buf->vb.bsize < size)
1497 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
1498 buf->vb.width = port->ts_packet_size;
1499 buf->vb.height = port->ts_packet_count;
1500 buf->vb.size = size;
1501 buf->vb.field = field /*V4L2_FIELD_TOP*/;
1503 rc = videobuf_iolock(q, &buf->vb, NULL);
1506 cx23885_risc_databuffer(dev->pci, &buf->risc,
1507 videobuf_to_dma(&buf->vb)->sglist,
1508 buf->vb.width, buf->vb.height);
1510 buf->vb.state = VIDEOBUF_PREPARED;
1514 cx23885_free_buffer(q, buf);
1518 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1520 struct cx23885_buffer *prev;
1521 struct cx23885_dev *dev = port->dev;
1522 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1524 /* add jump to stopper */
1525 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
1526 buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
1527 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1529 if (list_empty(&cx88q->active)) {
1530 dprintk(1, "queue is empty - first active\n");
1531 list_add_tail(&buf->vb.queue, &cx88q->active);
1532 cx23885_start_dma(port, cx88q, buf);
1533 buf->vb.state = VIDEOBUF_ACTIVE;
1534 buf->count = cx88q->count++;
1535 mod_timer(&cx88q->timeout, jiffies + BUFFER_TIMEOUT);
1536 dprintk(1, "[%p/%d] %s - first active\n",
1537 buf, buf->vb.i, __func__);
1539 dprintk(1, "queue is not empty - append to active\n");
1540 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1542 list_add_tail(&buf->vb.queue, &cx88q->active);
1543 buf->vb.state = VIDEOBUF_ACTIVE;
1544 buf->count = cx88q->count++;
1545 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1546 prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1547 dprintk(1, "[%p/%d] %s - append to active\n",
1548 buf, buf->vb.i, __func__);
1552 /* ----------------------------------------------------------- */
1554 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
1557 struct cx23885_dev *dev = port->dev;
1558 struct cx23885_dmaqueue *q = &port->mpegq;
1559 struct cx23885_buffer *buf;
1560 unsigned long flags;
1562 spin_lock_irqsave(&port->slock, flags);
1563 while (!list_empty(&q->active)) {
1564 buf = list_entry(q->active.next, struct cx23885_buffer,
1566 list_del(&buf->vb.queue);
1567 buf->vb.state = VIDEOBUF_ERROR;
1568 wake_up(&buf->vb.done);
1569 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1570 buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
1573 dprintk(1, "restarting queue\n");
1574 cx23885_restart_queue(port, q);
1576 spin_unlock_irqrestore(&port->slock, flags);
1579 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1581 struct cx23885_dev *dev = port->dev;
1582 struct cx23885_dmaqueue *q = &port->mpegq;
1584 dprintk(1, "%s()\n", __func__);
1585 del_timer_sync(&q->timeout);
1586 cx23885_stop_dma(port);
1587 do_cancel_buffers(port, "cancel", 0);
1590 static void cx23885_timeout(unsigned long data)
1592 struct cx23885_tsport *port = (struct cx23885_tsport *)data;
1593 struct cx23885_dev *dev = port->dev;
1595 dprintk(1, "%s()\n", __func__);
1598 cx23885_sram_channel_dump(dev,
1599 &dev->sram_channels[port->sram_chno]);
1601 cx23885_stop_dma(port);
1602 do_cancel_buffers(port, "timeout", 1);
1605 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1607 /* FIXME: port1 assumption here. */
1608 struct cx23885_tsport *port = &dev->ts1;
1615 count = cx_read(port->reg_gpcnt);
1616 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1617 status, cx_read(port->reg_ts_int_msk), count);
1619 if ((status & VID_B_MSK_BAD_PKT) ||
1620 (status & VID_B_MSK_OPC_ERR) ||
1621 (status & VID_B_MSK_VBI_OPC_ERR) ||
1622 (status & VID_B_MSK_SYNC) ||
1623 (status & VID_B_MSK_VBI_SYNC) ||
1624 (status & VID_B_MSK_OF) ||
1625 (status & VID_B_MSK_VBI_OF)) {
1626 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1627 "= 0x%x\n", dev->name, status);
1628 if (status & VID_B_MSK_BAD_PKT)
1629 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1630 if (status & VID_B_MSK_OPC_ERR)
1631 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1632 if (status & VID_B_MSK_VBI_OPC_ERR)
1633 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1634 if (status & VID_B_MSK_SYNC)
1635 dprintk(1, " VID_B_MSK_SYNC\n");
1636 if (status & VID_B_MSK_VBI_SYNC)
1637 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1638 if (status & VID_B_MSK_OF)
1639 dprintk(1, " VID_B_MSK_OF\n");
1640 if (status & VID_B_MSK_VBI_OF)
1641 dprintk(1, " VID_B_MSK_VBI_OF\n");
1643 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1644 cx23885_sram_channel_dump(dev,
1645 &dev->sram_channels[port->sram_chno]);
1646 cx23885_417_check_encoder(dev);
1647 } else if (status & VID_B_MSK_RISCI1) {
1648 dprintk(7, " VID_B_MSK_RISCI1\n");
1649 spin_lock(&port->slock);
1650 cx23885_wakeup(port, &port->mpegq, count);
1651 spin_unlock(&port->slock);
1652 } else if (status & VID_B_MSK_RISCI2) {
1653 dprintk(7, " VID_B_MSK_RISCI2\n");
1654 spin_lock(&port->slock);
1655 cx23885_restart_queue(port, &port->mpegq);
1656 spin_unlock(&port->slock);
1659 cx_write(port->reg_ts_int_stat, status);
1666 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1668 struct cx23885_dev *dev = port->dev;
1672 if ((status & VID_BC_MSK_OPC_ERR) ||
1673 (status & VID_BC_MSK_BAD_PKT) ||
1674 (status & VID_BC_MSK_SYNC) ||
1675 (status & VID_BC_MSK_OF)) {
1677 if (status & VID_BC_MSK_OPC_ERR)
1678 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1679 VID_BC_MSK_OPC_ERR);
1681 if (status & VID_BC_MSK_BAD_PKT)
1682 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1683 VID_BC_MSK_BAD_PKT);
1685 if (status & VID_BC_MSK_SYNC)
1686 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1689 if (status & VID_BC_MSK_OF)
1690 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1693 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1695 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1696 cx23885_sram_channel_dump(dev,
1697 &dev->sram_channels[port->sram_chno]);
1699 } else if (status & VID_BC_MSK_RISCI1) {
1701 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1703 spin_lock(&port->slock);
1704 count = cx_read(port->reg_gpcnt);
1705 cx23885_wakeup(port, &port->mpegq, count);
1706 spin_unlock(&port->slock);
1708 } else if (status & VID_BC_MSK_RISCI2) {
1710 dprintk(7, " (RISCI2 0x%08x)\n", VID_BC_MSK_RISCI2);
1712 spin_lock(&port->slock);
1713 cx23885_restart_queue(port, &port->mpegq);
1714 spin_unlock(&port->slock);
1718 cx_write(port->reg_ts_int_stat, status);
1725 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1727 struct cx23885_dev *dev = dev_id;
1728 struct cx23885_tsport *ts1 = &dev->ts1;
1729 struct cx23885_tsport *ts2 = &dev->ts2;
1730 u32 pci_status, pci_mask;
1731 u32 vida_status, vida_mask;
1732 u32 ts1_status, ts1_mask;
1733 u32 ts2_status, ts2_mask;
1734 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1735 bool subdev_handled;
1737 pci_status = cx_read(PCI_INT_STAT);
1738 pci_mask = cx23885_irq_get_mask(dev);
1739 vida_status = cx_read(VID_A_INT_STAT);
1740 vida_mask = cx_read(VID_A_INT_MSK);
1741 ts1_status = cx_read(VID_B_INT_STAT);
1742 ts1_mask = cx_read(VID_B_INT_MSK);
1743 ts2_status = cx_read(VID_C_INT_STAT);
1744 ts2_mask = cx_read(VID_C_INT_MSK);
1746 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1749 vida_count = cx_read(VID_A_GPCNT);
1750 ts1_count = cx_read(ts1->reg_gpcnt);
1751 ts2_count = cx_read(ts2->reg_gpcnt);
1752 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1753 pci_status, pci_mask);
1754 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1755 vida_status, vida_mask, vida_count);
1756 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1757 ts1_status, ts1_mask, ts1_count);
1758 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1759 ts2_status, ts2_mask, ts2_count);
1761 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1762 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1763 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1764 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1765 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1766 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1768 if (pci_status & PCI_MSK_RISC_RD)
1769 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1772 if (pci_status & PCI_MSK_RISC_WR)
1773 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1776 if (pci_status & PCI_MSK_AL_RD)
1777 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1780 if (pci_status & PCI_MSK_AL_WR)
1781 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1784 if (pci_status & PCI_MSK_APB_DMA)
1785 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1788 if (pci_status & PCI_MSK_VID_C)
1789 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1792 if (pci_status & PCI_MSK_VID_B)
1793 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1796 if (pci_status & PCI_MSK_VID_A)
1797 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1800 if (pci_status & PCI_MSK_AUD_INT)
1801 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1804 if (pci_status & PCI_MSK_AUD_EXT)
1805 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1808 if (pci_status & PCI_MSK_GPIO0)
1809 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1812 if (pci_status & PCI_MSK_GPIO1)
1813 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1816 if (pci_status & PCI_MSK_AV_CORE)
1817 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1820 if (pci_status & PCI_MSK_IR)
1821 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1825 if (cx23885_boards[dev->board].cimax > 0 &&
1826 ((pci_status & PCI_MSK_GPIO0) ||
1827 (pci_status & PCI_MSK_GPIO1))) {
1829 if (cx23885_boards[dev->board].cimax > 0)
1830 handled += netup_ci_slot_status(dev, pci_status);
1835 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1836 handled += cx23885_irq_ts(ts1, ts1_status);
1838 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1839 handled += cx23885_irq_417(dev, ts1_status);
1843 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1844 handled += cx23885_irq_ts(ts2, ts2_status);
1846 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1847 handled += cx23885_irq_417(dev, ts2_status);
1851 handled += cx23885_video_irq(dev, vida_status);
1853 if (pci_status & PCI_MSK_IR) {
1854 subdev_handled = false;
1855 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1856 pci_status, &subdev_handled);
1861 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1862 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1863 if (!schedule_work(&dev->cx25840_work))
1864 printk(KERN_ERR "%s: failed to set up deferred work for"
1865 " AV Core/IR interrupt. Interrupt is disabled"
1866 " and won't be re-enabled\n", dev->name);
1871 cx_write(PCI_INT_STAT, pci_status);
1873 return IRQ_RETVAL(handled);
1876 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1877 unsigned int notification, void *arg)
1879 struct cx23885_dev *dev;
1884 dev = to_cx23885(sd->v4l2_dev);
1886 switch (notification) {
1887 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1888 if (sd == dev->sd_ir)
1889 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1891 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1892 if (sd == dev->sd_ir)
1893 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1898 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1900 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1901 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1902 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1903 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1906 static inline int encoder_on_portb(struct cx23885_dev *dev)
1908 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1911 static inline int encoder_on_portc(struct cx23885_dev *dev)
1913 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1916 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1917 * registers depending on the board configuration (and whether the
1918 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1919 * be pushed into the correct hardware register, regardless of the
1920 * physical location. Certain registers are shared so we sanity check
1921 * and report errors if we think we're tampering with a GPIo that might
1922 * be assigned to the encoder (and used for the host bus).
1924 * GPIO 2 thru 0 - On the cx23885 bridge
1925 * GPIO 18 thru 3 - On the cx23417 host bus interface
1926 * GPIO 23 thru 19 - On the cx25840 a/v core
1928 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1931 cx_set(GP0_IO, mask & 0x7);
1933 if (mask & 0x0007fff8) {
1934 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1936 "%s: Setting GPIO on encoder ports\n",
1938 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1942 if (mask & 0x00f80000)
1943 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1946 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1948 if (mask & 0x00000007)
1949 cx_clear(GP0_IO, mask & 0x7);
1951 if (mask & 0x0007fff8) {
1952 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1954 "%s: Clearing GPIO moving on encoder ports\n",
1956 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1960 if (mask & 0x00f80000)
1961 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1964 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1966 if (mask & 0x00000007)
1967 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1969 if (mask & 0x0007fff8) {
1970 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1972 "%s: Reading GPIO moving on encoder ports\n",
1974 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1978 if (mask & 0x00f80000)
1979 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1984 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1986 if ((mask & 0x00000007) && asoutput)
1987 cx_set(GP0_IO, (mask & 0x7) << 16);
1988 else if ((mask & 0x00000007) && !asoutput)
1989 cx_clear(GP0_IO, (mask & 0x7) << 16);
1991 if (mask & 0x0007fff8) {
1992 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1994 "%s: Enabling GPIO on encoder ports\n",
1998 /* MC417_OEN is active low for output, write 1 for an input */
1999 if ((mask & 0x0007fff8) && asoutput)
2000 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2002 else if ((mask & 0x0007fff8) && !asoutput)
2003 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2008 static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
2009 const struct pci_device_id *pci_id)
2011 struct cx23885_dev *dev;
2014 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2018 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2022 /* Prepare to handle notifications from subdevices */
2023 cx23885_v4l2_dev_notify_init(dev);
2027 if (pci_enable_device(pci_dev)) {
2032 if (cx23885_dev_setup(dev) < 0) {
2037 /* print pci info */
2038 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
2039 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
2040 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
2041 "latency: %d, mmio: 0x%llx\n", dev->name,
2042 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2044 (unsigned long long)pci_resource_start(pci_dev, 0));
2046 pci_set_master(pci_dev);
2047 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
2048 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2053 if (!pci_enable_msi(pci_dev))
2054 err = request_irq(pci_dev->irq, cx23885_irq,
2055 IRQF_DISABLED, dev->name, dev);
2057 err = request_irq(pci_dev->irq, cx23885_irq,
2058 IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
2060 printk(KERN_ERR "%s: can't get IRQ %d\n",
2061 dev->name, pci_dev->irq);
2065 switch (dev->board) {
2066 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2067 cx23885_irq_add_enable(dev, 0x01800000); /* for NetUP */
2072 * The CX2388[58] IR controller can start firing interrupts when
2073 * enabled, so these have to take place after the cx23885_irq() handler
2074 * is hooked up by the call to request_irq() above.
2076 cx23885_ir_pci_int_enable(dev);
2077 cx23885_input_init(dev);
2082 cx23885_dev_unregister(dev);
2084 v4l2_device_unregister(&dev->v4l2_dev);
2090 static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
2092 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2093 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2095 cx23885_input_fini(dev);
2096 cx23885_ir_fini(dev);
2098 cx23885_shutdown(dev);
2100 pci_disable_device(pci_dev);
2102 /* unregister stuff */
2103 free_irq(pci_dev->irq, dev);
2104 pci_disable_msi(pci_dev);
2106 cx23885_dev_unregister(dev);
2107 v4l2_device_unregister(v4l2_dev);
2111 static struct pci_device_id cx23885_pci_tbl[] = {
2116 .subvendor = PCI_ANY_ID,
2117 .subdevice = PCI_ANY_ID,
2122 .subvendor = PCI_ANY_ID,
2123 .subdevice = PCI_ANY_ID,
2125 /* --- end of list --- */
2128 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2130 static struct pci_driver cx23885_pci_driver = {
2132 .id_table = cx23885_pci_tbl,
2133 .probe = cx23885_initdev,
2134 .remove = __devexit_p(cx23885_finidev),
2140 static int __init cx23885_init(void)
2142 printk(KERN_INFO "cx23885 driver version %d.%d.%d loaded\n",
2143 (CX23885_VERSION_CODE >> 16) & 0xff,
2144 (CX23885_VERSION_CODE >> 8) & 0xff,
2145 CX23885_VERSION_CODE & 0xff);
2147 printk(KERN_INFO "cx23885: snapshot date %04d-%02d-%02d\n",
2148 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
2150 return pci_register_driver(&cx23885_pci_driver);
2153 static void __exit cx23885_fini(void)
2155 pci_unregister_driver(&cx23885_pci_driver);
2158 module_init(cx23885_init);
2159 module_exit(cx23885_fini);
2161 /* ----------------------------------------------------------- */