Merge branch 'drm-ttm-unmappable' into drm-core-next
[pandora-kernel.git] / drivers / staging / crystalhd / crystalhd_hw.c
1 /***************************************************************************
2  * Copyright (c) 2005-2009, Broadcom Corporation.
3  *
4  *  Name: crystalhd_hw . c
5  *
6  *  Description:
7  *              BCM70010 Linux driver HW layer.
8  *
9  **********************************************************************
10  * This file is part of the crystalhd device driver.
11  *
12  * This driver is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation, version 2 of the License.
15  *
16  * This driver is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this driver.  If not, see <http://www.gnu.org/licenses/>.
23  **********************************************************************/
24
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include "crystalhd_hw.h"
29
30 /* Functions internal to this file */
31
32 static void crystalhd_enable_uarts(struct crystalhd_adp *adp)
33 {
34         bc_dec_reg_wr(adp, UartSelectA, BSVS_UART_STREAM);
35         bc_dec_reg_wr(adp, UartSelectB, BSVS_UART_DEC_OUTER);
36 }
37
38
39 static void crystalhd_start_dram(struct crystalhd_adp *adp)
40 {
41         bc_dec_reg_wr(adp, SDRAM_PARAM, ((40 / 5 - 1) <<  0) |
42         /* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) <<  4) | // trcd */
43                       ((15 / 5 - 1) <<  7) |    /* trp */
44                       ((10 / 5 - 1) << 10) |    /* trrd */
45                       ((15 / 5 + 1) << 12) |    /* twr */
46                       ((2 + 1) << 16) |         /* twtr */
47                       ((70 / 5 - 2) << 19) |    /* trfc */
48                       (0 << 23));
49
50         bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
51         bc_dec_reg_wr(adp, SDRAM_EXT_MODE, 2);
52         bc_dec_reg_wr(adp, SDRAM_MODE, 0x132);
53         bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
54         bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
55         bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
56         bc_dec_reg_wr(adp, SDRAM_MODE, 0x32);
57         /* setting the refresh rate here */
58         bc_dec_reg_wr(adp, SDRAM_REF_PARAM, ((1 << 12) | 96));
59 }
60
61
62 static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
63 {
64         link_misc_perst_deco_ctrl rst_deco_cntrl;
65         link_misc_perst_clk_ctrl rst_clk_cntrl;
66         uint32_t temp;
67
68         /*
69          * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit,
70          * delay to allow PLL to lock Clear alternate clock, stop clock bits
71          */
72         rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
73         rst_clk_cntrl.pll_pwr_dn = 0;
74         crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
75         msleep_interruptible(50);
76
77         rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
78         rst_clk_cntrl.stop_core_clk = 0;
79         rst_clk_cntrl.sel_alt_clk = 0;
80
81         crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
82         msleep_interruptible(50);
83
84         /*
85          * Bus Arbiter Timeout: GISB_ARBITER_TIMER
86          * Set internal bus arbiter timeout to 40us based on core clock speed
87          * (63MHz * 40us = 0x9D8)
88          */
89         crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x9D8);
90
91         /*
92          * Decoder clocks: MISC_PERST_DECODER_CTRL
93          * Enable clocks while 7412 reset is asserted, delay
94          * De-assert 7412 reset
95          */
96         rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
97         rst_deco_cntrl.stop_bcm_7412_clk = 0;
98         rst_deco_cntrl.bcm7412_rst = 1;
99         crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
100         msleep_interruptible(10);
101
102         rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
103         rst_deco_cntrl.bcm7412_rst = 0;
104         crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
105         msleep_interruptible(50);
106
107         /* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */
108         crystalhd_reg_wr(adp, OTP_CONTENT_MISC, 0);
109
110         /* Clear bit 29 of 0x404 */
111         temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
112         temp &= ~BC_BIT(29);
113         crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
114
115         /* 2.5V regulator must be set to 2.6 volts (+6%) */
116         /* FIXME: jarod: what's the point of this reg read? */
117         temp = crystalhd_reg_rd(adp, MISC_PERST_VREG_CTRL);
118         crystalhd_reg_wr(adp, MISC_PERST_VREG_CTRL, 0xF3);
119
120         return true;
121 }
122
123 static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
124 {
125         link_misc_perst_deco_ctrl rst_deco_cntrl;
126         link_misc_perst_clk_ctrl  rst_clk_cntrl;
127         uint32_t                  temp;
128
129         /*
130          * Decoder clocks: MISC_PERST_DECODER_CTRL
131          * Assert 7412 reset, delay
132          * Assert 7412 stop clock
133          */
134         rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
135         rst_deco_cntrl.stop_bcm_7412_clk = 1;
136         crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
137         msleep_interruptible(50);
138
139         /* Bus Arbiter Timeout: GISB_ARBITER_TIMER
140          * Set internal bus arbiter timeout to 40us based on core clock speed
141          * (6.75MHZ * 40us = 0x10E)
142          */
143         crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x10E);
144
145         /* Link clocks: MISC_PERST_CLOCK_CTRL
146          * Stop core clk, delay
147          * Set alternate clk, delay, set PLL power down
148          */
149         rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
150         rst_clk_cntrl.stop_core_clk = 1;
151         rst_clk_cntrl.sel_alt_clk = 1;
152         crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
153         msleep_interruptible(50);
154
155         rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
156         rst_clk_cntrl.pll_pwr_dn = 1;
157         crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
158
159         /*
160          * Read and restore the Transaction Configuration Register
161          * after core reset
162          */
163         temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
164
165         /*
166          * Link core soft reset: MISC3_RESET_CTRL
167          * - Write BIT[0]=1 and read it back for core reset to take place
168          */
169         crystalhd_reg_wr(adp, MISC3_RESET_CTRL, 1);
170         rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC3_RESET_CTRL);
171         msleep_interruptible(50);
172
173         /* restore the transaction configuration register */
174         crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
175
176         return true;
177 }
178
179 static void crystalhd_disable_interrupts(struct crystalhd_adp *adp)
180 {
181         intr_mask_reg   intr_mask;
182         intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
183         intr_mask.mask_pcie_err = 1;
184         intr_mask.mask_pcie_rbusmast_err = 1;
185         intr_mask.mask_pcie_rgr_bridge   = 1;
186         intr_mask.mask_rx_done = 1;
187         intr_mask.mask_rx_err  = 1;
188         intr_mask.mask_tx_done = 1;
189         intr_mask.mask_tx_err  = 1;
190         crystalhd_reg_wr(adp, INTR_INTR_MSK_SET_REG, intr_mask.whole_reg);
191
192         return;
193 }
194
195 static void crystalhd_enable_interrupts(struct crystalhd_adp *adp)
196 {
197         intr_mask_reg   intr_mask;
198         intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
199         intr_mask.mask_pcie_err = 1;
200         intr_mask.mask_pcie_rbusmast_err = 1;
201         intr_mask.mask_pcie_rgr_bridge   = 1;
202         intr_mask.mask_rx_done = 1;
203         intr_mask.mask_rx_err  = 1;
204         intr_mask.mask_tx_done = 1;
205         intr_mask.mask_tx_err  = 1;
206         crystalhd_reg_wr(adp, INTR_INTR_MSK_CLR_REG, intr_mask.whole_reg);
207
208         return;
209 }
210
211 static void crystalhd_clear_errors(struct crystalhd_adp *adp)
212 {
213         uint32_t reg;
214
215         /* FIXME: jarod: wouldn't we want to write a 0 to the reg? Or does the write clear the bits specified? */
216         reg = crystalhd_reg_rd(adp, MISC1_Y_RX_ERROR_STATUS);
217         if (reg)
218                 crystalhd_reg_wr(adp, MISC1_Y_RX_ERROR_STATUS, reg);
219
220         reg = crystalhd_reg_rd(adp, MISC1_UV_RX_ERROR_STATUS);
221         if (reg)
222                 crystalhd_reg_wr(adp, MISC1_UV_RX_ERROR_STATUS, reg);
223
224         reg = crystalhd_reg_rd(adp, MISC1_TX_DMA_ERROR_STATUS);
225         if (reg)
226                 crystalhd_reg_wr(adp, MISC1_TX_DMA_ERROR_STATUS, reg);
227 }
228
229 static void crystalhd_clear_interrupts(struct crystalhd_adp *adp)
230 {
231         uint32_t intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
232
233         if (intr_sts) {
234                 crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
235
236                 /* Write End Of Interrupt for PCIE */
237                 crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
238         }
239 }
240
241 static void crystalhd_soft_rst(struct crystalhd_adp *adp)
242 {
243         uint32_t val;
244
245         /* Assert c011 soft reset*/
246         bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000001);
247         msleep_interruptible(50);
248
249         /* Release c011 soft reset*/
250         bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000000);
251
252         /* Disable Stuffing..*/
253         val = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
254         val |= BC_BIT(8);
255         crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, val);
256 }
257
258 static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp)
259 {
260         uint32_t i = 0, reg;
261
262         crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (BC_DRAM_FW_CFG_ADDR >> 19));
263
264         crystalhd_reg_wr(adp, AES_CMD, 0);
265         crystalhd_reg_wr(adp, AES_CONFIG_INFO, (BC_DRAM_FW_CFG_ADDR & 0x7FFFF));
266         crystalhd_reg_wr(adp, AES_CMD, 0x1);
267
268         /* FIXME: jarod: I've seen this fail, and introducing extra delays helps... */
269         for (i = 0; i < 100; ++i) {
270                 reg = crystalhd_reg_rd(adp, AES_STATUS);
271                 if (reg & 0x1)
272                         return true;
273                 msleep_interruptible(10);
274         }
275
276         return false;
277 }
278
279
280 static bool crystalhd_start_device(struct crystalhd_adp *adp)
281 {
282         uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0;
283
284         BCMLOG(BCMLOG_INFO, "Starting BCM70012 Device\n");
285
286         reg_pwrmgmt = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
287         reg_pwrmgmt &= ~ASPM_L1_ENABLE;
288
289         crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg_pwrmgmt);
290
291         if (!crystalhd_bring_out_of_rst(adp)) {
292                 BCMLOG_ERR("Failed To Bring Link Out Of Reset\n");
293                 return false;
294         }
295
296         crystalhd_disable_interrupts(adp);
297
298         crystalhd_clear_errors(adp);
299
300         crystalhd_clear_interrupts(adp);
301
302         crystalhd_enable_interrupts(adp);
303
304         /* Enable the option for getting the total no. of DWORDS
305          * that have been transfered by the RXDMA engine
306          */
307         dbg_options = crystalhd_reg_rd(adp, MISC1_DMA_DEBUG_OPTIONS_REG);
308         dbg_options |= 0x10;
309         crystalhd_reg_wr(adp, MISC1_DMA_DEBUG_OPTIONS_REG, dbg_options);
310
311         /* Enable PCI Global Control options */
312         glb_cntrl = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
313         glb_cntrl |= 0x100;
314         glb_cntrl |= 0x8000;
315         crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, glb_cntrl);
316
317         crystalhd_enable_interrupts(adp);
318
319         crystalhd_soft_rst(adp);
320         crystalhd_start_dram(adp);
321         crystalhd_enable_uarts(adp);
322
323         return true;
324 }
325
326 static bool crystalhd_stop_device(struct crystalhd_adp *adp)
327 {
328         uint32_t reg;
329
330         BCMLOG(BCMLOG_INFO, "Stopping BCM70012 Device\n");
331         /* Clear and disable interrupts */
332         crystalhd_disable_interrupts(adp);
333         crystalhd_clear_errors(adp);
334         crystalhd_clear_interrupts(adp);
335
336         if (!crystalhd_put_in_reset(adp))
337                 BCMLOG_ERR("Failed to Put Link To Reset State\n");
338
339         reg = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
340         reg |= ASPM_L1_ENABLE;
341         crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg);
342
343         /* Set PCI Clk Req */
344         reg = crystalhd_reg_rd(adp, PCIE_CLK_REQ_REG);
345         reg |= PCI_CLK_REQ_ENABLE;
346         crystalhd_reg_wr(adp, PCIE_CLK_REQ_REG, reg);
347
348         return true;
349 }
350
351 static crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw)
352 {
353         unsigned long flags = 0;
354         crystalhd_rx_dma_pkt *temp = NULL;
355
356         if (!hw)
357                 return NULL;
358
359         spin_lock_irqsave(&hw->lock, flags);
360         temp = hw->rx_pkt_pool_head;
361         if (temp) {
362                 hw->rx_pkt_pool_head = hw->rx_pkt_pool_head->next;
363                 temp->dio_req = NULL;
364                 temp->pkt_tag = 0;
365                 temp->flags = 0;
366         }
367         spin_unlock_irqrestore(&hw->lock, flags);
368
369         return temp;
370 }
371
372 static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
373                                    crystalhd_rx_dma_pkt *pkt)
374 {
375         unsigned long flags = 0;
376
377         if (!hw || !pkt)
378                 return;
379
380         spin_lock_irqsave(&hw->lock, flags);
381         pkt->next = hw->rx_pkt_pool_head;
382         hw->rx_pkt_pool_head = pkt;
383         spin_unlock_irqrestore(&hw->lock, flags);
384 }
385
386 /*
387  * Call back from TX - IOQ deletion.
388  *
389  * This routine will release the TX DMA rings allocated
390  * druing setup_dma rings interface.
391  *
392  * Memory is allocated per DMA ring basis. This is just
393  * a place holder to be able to create the dio queues.
394  */
395 static void crystalhd_tx_desc_rel_call_back(void *context, void *data)
396 {
397 }
398
399 /*
400  * Rx Packet release callback..
401  *
402  * Release All user mapped capture buffers and Our DMA packets
403  * back to our free pool. The actual cleanup of the DMA
404  * ring descriptors happen during dma ring release.
405  */
406 static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
407 {
408         struct crystalhd_hw *hw = (struct crystalhd_hw *)context;
409         crystalhd_rx_dma_pkt *pkt = (crystalhd_rx_dma_pkt *)data;
410
411         if (!pkt || !hw) {
412                 BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt);
413                 return;
414         }
415
416         if (pkt->dio_req)
417                 crystalhd_unmap_dio(hw->adp, pkt->dio_req);
418         else
419                 BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt->pkt_tag);
420
421         crystalhd_hw_free_rx_pkt(hw, pkt);
422 }
423
424 #define crystalhd_hw_delete_ioq(adp, q)         \
425         if (q) {                                \
426                 crystalhd_delete_dioq(adp, q);  \
427                 q = NULL;                       \
428         }
429
430 static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
431 {
432         if (!hw)
433                 return;
434
435         BCMLOG(BCMLOG_DBG, "Deleting IOQs \n");
436         crystalhd_hw_delete_ioq(hw->adp, hw->tx_actq);
437         crystalhd_hw_delete_ioq(hw->adp, hw->tx_freeq);
438         crystalhd_hw_delete_ioq(hw->adp, hw->rx_actq);
439         crystalhd_hw_delete_ioq(hw->adp, hw->rx_freeq);
440         crystalhd_hw_delete_ioq(hw->adp, hw->rx_rdyq);
441 }
442
443 #define crystalhd_hw_create_ioq(sts, hw, q, cb)                 \
444 do {                                                            \
445         sts = crystalhd_create_dioq(hw->adp, &q, cb, hw);       \
446         if (sts != BC_STS_SUCCESS)                              \
447                 goto hw_create_ioq_err;                         \
448 } while (0)
449
450 /*
451  * Create IOQs..
452  *
453  * TX - Active & Free
454  * RX - Active, Ready and Free.
455  */
456 static BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw   *hw)
457 {
458         BC_STATUS   sts = BC_STS_SUCCESS;
459
460         if (!hw) {
461                 BCMLOG_ERR("Invalid Arg!!\n");
462                 return BC_STS_INV_ARG;
463         }
464
465         crystalhd_hw_create_ioq(sts, hw, hw->tx_freeq,
466                               crystalhd_tx_desc_rel_call_back);
467         crystalhd_hw_create_ioq(sts, hw, hw->tx_actq,
468                               crystalhd_tx_desc_rel_call_back);
469
470         crystalhd_hw_create_ioq(sts, hw, hw->rx_freeq,
471                               crystalhd_rx_pkt_rel_call_back);
472         crystalhd_hw_create_ioq(sts, hw, hw->rx_rdyq,
473                               crystalhd_rx_pkt_rel_call_back);
474         crystalhd_hw_create_ioq(sts, hw, hw->rx_actq,
475                               crystalhd_rx_pkt_rel_call_back);
476
477         return sts;
478
479 hw_create_ioq_err:
480         crystalhd_hw_delete_ioqs(hw);
481
482         return sts;
483 }
484
485
486 static bool crystalhd_code_in_full(struct crystalhd_adp *adp, uint32_t needed_sz,
487                                  bool b_188_byte_pkts,  uint8_t flags)
488 {
489         uint32_t base, end, writep, readp;
490         uint32_t cpbSize, cpbFullness, fifoSize;
491
492         if (flags & 0x02) { /* ASF Bit is set */
493                 base   = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Base);
494                 end    = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2End);
495                 writep = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Wrptr);
496                 readp  = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Rdptr);
497         } else if (b_188_byte_pkts) { /*Encrypted 188 byte packets*/
498                 base   = bc_dec_reg_rd(adp, REG_Dec_TsUser0Base);
499                 end    = bc_dec_reg_rd(adp, REG_Dec_TsUser0End);
500                 writep = bc_dec_reg_rd(adp, REG_Dec_TsUser0Wrptr);
501                 readp  = bc_dec_reg_rd(adp, REG_Dec_TsUser0Rdptr);
502         } else {
503                 base   = bc_dec_reg_rd(adp, REG_DecCA_RegCinBase);
504                 end    = bc_dec_reg_rd(adp, REG_DecCA_RegCinEnd);
505                 writep = bc_dec_reg_rd(adp, REG_DecCA_RegCinWrPtr);
506                 readp  = bc_dec_reg_rd(adp, REG_DecCA_RegCinRdPtr);
507         }
508
509         cpbSize = end - base;
510         if (writep >= readp)
511                 cpbFullness = writep - readp;
512         else
513                 cpbFullness = (end - base) - (readp - writep);
514
515         fifoSize = cpbSize - cpbFullness;
516
517         if (fifoSize < BC_INFIFO_THRESHOLD)
518                 return true;
519
520         if (needed_sz > (fifoSize - BC_INFIFO_THRESHOLD))
521                 return true;
522
523         return false;
524 }
525
526 static BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
527                                             uint32_t list_id, BC_STATUS cs)
528 {
529         tx_dma_pkt *tx_req;
530
531         if (!hw || !list_id) {
532                 BCMLOG_ERR("Invalid Arg..\n");
533                 return BC_STS_INV_ARG;
534         }
535
536         hw->pwr_lock--;
537
538         tx_req = (tx_dma_pkt *)crystalhd_dioq_find_and_fetch(hw->tx_actq, list_id);
539         if (!tx_req) {
540                 if (cs != BC_STS_IO_USER_ABORT)
541                         BCMLOG_ERR("Find and Fetch Did not find req\n");
542                 return BC_STS_NO_DATA;
543         }
544
545         if (tx_req->call_back) {
546                 tx_req->call_back(tx_req->dio_req, tx_req->cb_event, cs);
547                 tx_req->dio_req   = NULL;
548                 tx_req->cb_event  = NULL;
549                 tx_req->call_back = NULL;
550         } else {
551                 BCMLOG(BCMLOG_DBG, "Missing Tx Callback - %X\n",
552                        tx_req->list_tag);
553         }
554
555         /* Now put back the tx_list back in FreeQ */
556         tx_req->list_tag = 0;
557
558         return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0);
559 }
560
561 static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw, uint32_t err_sts)
562 {
563         uint32_t err_mask, tmp;
564         unsigned long flags = 0;
565
566         err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK |
567                 MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK |
568                 MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
569
570         if (!(err_sts & err_mask))
571                 return false;
572
573         BCMLOG_ERR("Error on Tx-L0 %x \n", err_sts);
574
575         tmp = err_mask;
576
577         if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK)
578                 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
579
580         if (tmp) {
581                 spin_lock_irqsave(&hw->lock, flags);
582                 /* reset list index.*/
583                 hw->tx_list_post_index = 0;
584                 spin_unlock_irqrestore(&hw->lock, flags);
585         }
586
587         tmp = err_sts & err_mask;
588         crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
589
590         return true;
591 }
592
593 static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw, uint32_t err_sts)
594 {
595         uint32_t err_mask, tmp;
596         unsigned long flags = 0;
597
598         err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK |
599                 MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK |
600                 MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
601
602         if (!(err_sts & err_mask))
603                 return false;
604
605         BCMLOG_ERR("Error on Tx-L1 %x \n", err_sts);
606
607         tmp = err_mask;
608
609         if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK)
610                 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
611
612         if (tmp) {
613                 spin_lock_irqsave(&hw->lock, flags);
614                 /* reset list index.*/
615                 hw->tx_list_post_index = 0;
616                 spin_unlock_irqrestore(&hw->lock, flags);
617         }
618
619         tmp = err_sts & err_mask;
620         crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
621
622         return true;
623 }
624
625 static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts)
626 {
627         uint32_t err_sts;
628
629         if (int_sts & INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK)
630                 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
631                                            BC_STS_SUCCESS);
632
633         if (int_sts & INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK)
634                 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
635                                            BC_STS_SUCCESS);
636
637         if (!(int_sts & (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK |
638                          INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) {
639                          /* No error mask set.. */
640                          return;
641         }
642
643         /* Handle Tx errors. */
644         err_sts = crystalhd_reg_rd(hw->adp, MISC1_TX_DMA_ERROR_STATUS);
645
646         if (crystalhd_tx_list0_handler(hw, err_sts))
647                 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
648                                            BC_STS_ERROR);
649
650         if (crystalhd_tx_list1_handler(hw, err_sts))
651                 crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
652                                            BC_STS_ERROR);
653
654         hw->stats.tx_errors++;
655 }
656
657 static void crystalhd_hw_dump_desc(pdma_descriptor p_dma_desc,
658                                  uint32_t ul_desc_index, uint32_t cnt)
659 {
660         uint32_t ix, ll = 0;
661
662         if (!p_dma_desc || !cnt)
663                 return;
664
665         /* FIXME: jarod: perhaps a modparam desc_debug to enable this, rather than
666          * setting ll (log level, I presume) to non-zero? */
667         if (!ll)
668                 return;
669
670         for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) {
671                 BCMLOG(ll, "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
672                        ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"),
673                        ul_desc_index,
674                        p_dma_desc[ul_desc_index].buff_addr_high,
675                        p_dma_desc[ul_desc_index].buff_addr_low,
676                        p_dma_desc[ul_desc_index].next_desc_addr_high,
677                        p_dma_desc[ul_desc_index].next_desc_addr_low,
678                        p_dma_desc[ul_desc_index].xfer_size,
679                        p_dma_desc[ul_desc_index].intr_enable,
680                        p_dma_desc[ul_desc_index].last_rec_indicator);
681         }
682
683 }
684
685 static BC_STATUS crystalhd_hw_fill_desc(crystalhd_dio_req *ioreq,
686                                       dma_descriptor *desc,
687                                       dma_addr_t desc_paddr_base,
688                                       uint32_t sg_cnt, uint32_t sg_st_ix,
689                                       uint32_t sg_st_off, uint32_t xfr_sz)
690 {
691         uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0;
692         dma_addr_t desc_phy_addr = desc_paddr_base;
693         addr_64 addr_temp;
694
695         if (!ioreq || !desc || !desc_paddr_base || !xfr_sz ||
696             (!sg_cnt && !ioreq->uinfo.dir_tx)) {
697                 BCMLOG_ERR("Invalid Args\n");
698                 return BC_STS_INV_ARG;
699         }
700
701         for (ix = 0; ix < sg_cnt; ix++) {
702
703                 /* Setup SGLE index. */
704                 sg_ix = ix + sg_st_ix;
705
706                 /* Get SGLE length */
707                 len = crystalhd_get_sgle_len(ioreq, sg_ix);
708                 if (len % 4) {
709                         BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix, sg_cnt);
710                         return BC_STS_NOT_IMPL;
711                 }
712                 /* Setup DMA desc with Phy addr & Length at current index. */
713                 addr_temp.full_addr = crystalhd_get_sgle_paddr(ioreq, sg_ix);
714                 if (sg_ix == sg_st_ix) {
715                         addr_temp.full_addr += sg_st_off;
716                         len -= sg_st_off;
717                 }
718                 memset(&desc[ix], 0, sizeof(desc[ix]));
719                 desc[ix].buff_addr_low  = addr_temp.low_part;
720                 desc[ix].buff_addr_high = addr_temp.high_part;
721                 desc[ix].dma_dir        = ioreq->uinfo.dir_tx;
722
723                 /* Chain DMA descriptor.  */
724                 addr_temp.full_addr = desc_phy_addr + sizeof(dma_descriptor);
725                 desc[ix].next_desc_addr_low = addr_temp.low_part;
726                 desc[ix].next_desc_addr_high = addr_temp.high_part;
727
728                 if ((count + len) > xfr_sz)
729                         len = xfr_sz - count;
730
731                 /* Debug.. */
732                 if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) {
733                         BCMLOG_ERR("inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
734                                    len, ix, count, xfr_sz, sg_cnt);
735                         return BC_STS_ERROR;
736                 }
737                 /* Length expects Multiple of 4 */
738                 desc[ix].xfer_size = (len / 4);
739
740                 crystalhd_hw_dump_desc(desc, ix, 1);
741
742                 count += len;
743                 desc_phy_addr += sizeof(dma_descriptor);
744         }
745
746         last_desc_ix = ix - 1;
747
748         if (ioreq->fb_size) {
749                 memset(&desc[ix], 0, sizeof(desc[ix]));
750                 addr_temp.full_addr     = ioreq->fb_pa;
751                 desc[ix].buff_addr_low  = addr_temp.low_part;
752                 desc[ix].buff_addr_high = addr_temp.high_part;
753                 desc[ix].dma_dir        = ioreq->uinfo.dir_tx;
754                 desc[ix].xfer_size      = 1;
755                 desc[ix].fill_bytes     = 4 - ioreq->fb_size;
756                 count += ioreq->fb_size;
757                 last_desc_ix++;
758         }
759
760         /* setup last descriptor..*/
761         desc[last_desc_ix].last_rec_indicator  = 1;
762         desc[last_desc_ix].next_desc_addr_low  = 0;
763         desc[last_desc_ix].next_desc_addr_high = 0;
764         desc[last_desc_ix].intr_enable = 1;
765
766         crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
767
768         if (count != xfr_sz) {
769                 BCMLOG_ERR("interal error sz curr:%x exp:%x\n", count, xfr_sz);
770                 return BC_STS_ERROR;
771         }
772
773         return BC_STS_SUCCESS;
774 }
775
776 static BC_STATUS crystalhd_xlat_sgl_to_dma_desc(crystalhd_dio_req *ioreq,
777                                               pdma_desc_mem pdesc_mem,
778                                               uint32_t *uv_desc_index)
779 {
780         dma_descriptor *desc = NULL;
781         dma_addr_t desc_paddr_base = 0;
782         uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0;
783         uint32_t xfr_sz = 0;
784         BC_STATUS sts = BC_STS_SUCCESS;
785
786         /* Check params.. */
787         if (!ioreq || !pdesc_mem || !uv_desc_index) {
788                 BCMLOG_ERR("Invalid Args\n");
789                 return BC_STS_INV_ARG;
790         }
791
792         if (!pdesc_mem->sz || !pdesc_mem->pdma_desc_start ||
793             !ioreq->sg || (!ioreq->sg_cnt && !ioreq->uinfo.dir_tx)) {
794                 BCMLOG_ERR("Invalid Args\n");
795                 return BC_STS_INV_ARG;
796         }
797
798         if ((ioreq->uinfo.dir_tx) && (ioreq->uinfo.uv_offset)) {
799                 BCMLOG_ERR("UV offset for TX??\n");
800                 return BC_STS_INV_ARG;
801
802         }
803
804         desc = pdesc_mem->pdma_desc_start;
805         desc_paddr_base = pdesc_mem->phy_addr;
806
807         if (ioreq->uinfo.dir_tx || (ioreq->uinfo.uv_offset == 0)) {
808                 sg_cnt = ioreq->sg_cnt;
809                 xfr_sz = ioreq->uinfo.xfr_len;
810         } else {
811                 sg_cnt = ioreq->uinfo.uv_sg_ix + 1;
812                 xfr_sz = ioreq->uinfo.uv_offset;
813         }
814
815         sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
816                                    sg_st_ix, sg_st_off, xfr_sz);
817
818         if ((sts != BC_STS_SUCCESS) || !ioreq->uinfo.uv_offset)
819                 return sts;
820
821         /* Prepare for UV mapping.. */
822         desc = &pdesc_mem->pdma_desc_start[sg_cnt];
823         desc_paddr_base = pdesc_mem->phy_addr +
824                           (sg_cnt * sizeof(dma_descriptor));
825
826         /* Done with desc addr.. now update sg stuff.*/
827         sg_cnt    = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix;
828         xfr_sz    = ioreq->uinfo.xfr_len - ioreq->uinfo.uv_offset;
829         sg_st_ix  = ioreq->uinfo.uv_sg_ix;
830         sg_st_off = ioreq->uinfo.uv_sg_off;
831
832         sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
833                                    sg_st_ix, sg_st_off, xfr_sz);
834         if (sts != BC_STS_SUCCESS)
835                 return sts;
836
837         *uv_desc_index = sg_st_ix;
838
839         return sts;
840 }
841
842 static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw)
843 {
844         uint32_t dma_cntrl;
845
846         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
847         if (!(dma_cntrl & DMA_START_BIT)) {
848                 dma_cntrl |= DMA_START_BIT;
849                 crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS,
850                                dma_cntrl);
851         }
852
853         return;
854 }
855
856 /* _CHECK_THIS_
857  *
858  * Verify if the Stop generates a completion interrupt or not.
859  * if it does not generate an interrupt, then add polling here.
860  */
861 static BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
862 {
863         uint32_t dma_cntrl, cnt = 30;
864         uint32_t l1 = 1, l2 = 1;
865         unsigned long flags = 0;
866
867         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
868
869         BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
870
871         /* FIXME: jarod: invert dma_ctrl and check bit? or are there missing parens? */
872         if (!dma_cntrl & DMA_START_BIT) {
873                 BCMLOG(BCMLOG_DBG, "Already Stopped\n");
874                 return BC_STS_SUCCESS;
875         }
876
877         crystalhd_disable_interrupts(hw->adp);
878
879         /* Issue stop to HW */
880         /* This bit when set gave problems. Please check*/
881         dma_cntrl &= ~DMA_START_BIT;
882         crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
883
884         BCMLOG(BCMLOG_DBG, "Cleared the DMA Start bit\n");
885
886         /* Poll for 3seconds (30 * 100ms) on both the lists..*/
887         while ((l1 || l2) && cnt) {
888
889                 if (l1) {
890                         l1 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST0);
891                         l1 &= DMA_START_BIT;
892                 }
893
894                 if (l2) {
895                         l2 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST1);
896                         l2 &= DMA_START_BIT;
897                 }
898
899                 msleep_interruptible(100);
900
901                 cnt--;
902         }
903
904         if (!cnt) {
905                 BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1, l2);
906                 crystalhd_enable_interrupts(hw->adp);
907                 return BC_STS_ERROR;
908         }
909
910         spin_lock_irqsave(&hw->lock, flags);
911         hw->tx_list_post_index = 0;
912         spin_unlock_irqrestore(&hw->lock, flags);
913         BCMLOG(BCMLOG_DBG, "stopped TX DMA..\n");
914         crystalhd_enable_interrupts(hw->adp);
915
916         return BC_STS_SUCCESS;
917 }
918
919 static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw *hw)
920 {
921         /*
922         * Position of the PIB Entries can be found at
923         * 0th and the 1st location of the Circular list.
924         */
925         uint32_t Q_addr;
926         uint32_t pib_cnt, r_offset, w_offset;
927
928         Q_addr = hw->pib_del_Q_addr;
929
930         /* Get the Read Pointer */
931         crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
932
933         /* Get the Write Pointer */
934         crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
935
936         if (r_offset == w_offset)
937                 return 0;       /* Queue is empty */
938
939         if (w_offset > r_offset)
940                 pib_cnt = w_offset - r_offset;
941         else
942                 pib_cnt = (w_offset + MAX_PIB_Q_DEPTH) -
943                           (r_offset + MIN_PIB_Q_DEPTH);
944
945         if (pib_cnt > MAX_PIB_Q_DEPTH) {
946                 BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt);
947                 return 0;
948         }
949
950         return pib_cnt;
951 }
952
953 static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw)
954 {
955         uint32_t Q_addr;
956         uint32_t addr_entry, r_offset, w_offset;
957
958         Q_addr = hw->pib_del_Q_addr;
959
960         /* Get the Read Pointer 0Th Location is Read Pointer */
961         crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
962
963         /* Get the Write Pointer 1st Location is Write pointer */
964         crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
965
966         /* Queue is empty */
967         if (r_offset == w_offset)
968                 return 0;
969
970         if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH))
971                 return 0;
972
973         /* Get the Actual Address of the PIB */
974         crystalhd_mem_rd(hw->adp, Q_addr + (r_offset * sizeof(uint32_t)),
975                        1, &addr_entry);
976
977         /* Increment the Read Pointer */
978         r_offset++;
979
980         if (MAX_PIB_Q_DEPTH == r_offset)
981                 r_offset = MIN_PIB_Q_DEPTH;
982
983         /* Write back the read pointer to It's Location */
984         crystalhd_mem_wr(hw->adp, Q_addr, 1, &r_offset);
985
986         return addr_entry;
987 }
988
989 static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw, uint32_t addr_to_rel)
990 {
991         uint32_t Q_addr;
992         uint32_t r_offset, w_offset, n_offset;
993
994         Q_addr = hw->pib_rel_Q_addr;
995
996         /* Get the Read Pointer */
997         crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
998
999         /* Get the Write Pointer */
1000         crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
1001
1002         if ((r_offset < MIN_PIB_Q_DEPTH) ||
1003             (r_offset >= MAX_PIB_Q_DEPTH))
1004                 return false;
1005
1006         n_offset = w_offset + 1;
1007
1008         if (MAX_PIB_Q_DEPTH == n_offset)
1009                 n_offset = MIN_PIB_Q_DEPTH;
1010
1011         if (r_offset == n_offset)
1012                 return false; /* should never happen */
1013
1014         /* Write the DRAM ADDR to the Queue at Next Offset */
1015         crystalhd_mem_wr(hw->adp, Q_addr + (w_offset * sizeof(uint32_t)),
1016                        1, &addr_to_rel);
1017
1018         /* Put the New value of the write pointer in Queue */
1019         crystalhd_mem_wr(hw->adp, Q_addr + sizeof(uint32_t), 1, &n_offset);
1020
1021         return true;
1022 }
1023
1024 static void cpy_pib_to_app(C011_PIB *src_pib, BC_PIC_INFO_BLOCK *dst_pib)
1025 {
1026         if (!src_pib || !dst_pib) {
1027                 BCMLOG_ERR("Invalid Arguments\n");
1028                 return;
1029         }
1030
1031         dst_pib->timeStamp           = 0;
1032         dst_pib->picture_number      = src_pib->ppb.picture_number;
1033         dst_pib->width               = src_pib->ppb.width;
1034         dst_pib->height              = src_pib->ppb.height;
1035         dst_pib->chroma_format       = src_pib->ppb.chroma_format;
1036         dst_pib->pulldown            = src_pib->ppb.pulldown;
1037         dst_pib->flags               = src_pib->ppb.flags;
1038         dst_pib->sess_num            = src_pib->ptsStcOffset;
1039         dst_pib->aspect_ratio        = src_pib->ppb.aspect_ratio;
1040         dst_pib->colour_primaries     = src_pib->ppb.colour_primaries;
1041         dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload;
1042         dst_pib->frame_rate             = src_pib->resolution ;
1043         return;
1044 }
1045
1046 static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
1047 {
1048         unsigned int cnt;
1049         C011_PIB src_pib;
1050         uint32_t pib_addr, pib_cnt;
1051         BC_PIC_INFO_BLOCK *AppPib;
1052         crystalhd_rx_dma_pkt *rx_pkt = NULL;
1053
1054         pib_cnt = crystalhd_get_pib_avail_cnt(hw);
1055
1056         if (!pib_cnt)
1057                 return;
1058
1059         for (cnt = 0; cnt < pib_cnt; cnt++) {
1060
1061                 pib_addr = crystalhd_get_addr_from_pib_Q(hw);
1062                 crystalhd_mem_rd(hw->adp, pib_addr, sizeof(C011_PIB) / 4,
1063                                (uint32_t *)&src_pib);
1064
1065                 if (src_pib.bFormatChange) {
1066                         rx_pkt = (crystalhd_rx_dma_pkt *)crystalhd_dioq_fetch(hw->rx_freeq);
1067                         if (!rx_pkt)
1068                                 return;
1069                         rx_pkt->flags = 0;
1070                         rx_pkt->flags |= COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE;
1071                         AppPib = &rx_pkt->pib;
1072                         cpy_pib_to_app(&src_pib, AppPib);
1073
1074                         BCMLOG(BCMLOG_DBG,
1075                                "App PIB:%x %x %x %x %x %x %x %x %x %x\n",
1076                                rx_pkt->pib.picture_number,
1077                                rx_pkt->pib.aspect_ratio,
1078                                rx_pkt->pib.chroma_format,
1079                                rx_pkt->pib.colour_primaries,
1080                                rx_pkt->pib.frame_rate,
1081                                rx_pkt->pib.height,
1082                                rx_pkt->pib.height,
1083                                rx_pkt->pib.n_drop,
1084                                rx_pkt->pib.pulldown,
1085                                rx_pkt->pib.ycom);
1086
1087                         crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true, rx_pkt->pkt_tag);
1088
1089                 }
1090
1091                 crystalhd_rel_addr_to_pib_Q(hw, pib_addr);
1092         }
1093 }
1094
1095 static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw)
1096 {
1097         uint32_t        dma_cntrl;
1098
1099         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1100         if (!(dma_cntrl & DMA_START_BIT)) {
1101                 dma_cntrl |= DMA_START_BIT;
1102                 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1103         }
1104
1105         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1106         if (!(dma_cntrl & DMA_START_BIT)) {
1107                 dma_cntrl |= DMA_START_BIT;
1108                 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1109         }
1110
1111         return;
1112 }
1113
1114 static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
1115 {
1116         uint32_t dma_cntrl = 0, count = 30;
1117         uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1;
1118
1119         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1120         if ((dma_cntrl & DMA_START_BIT)) {
1121                 dma_cntrl &= ~DMA_START_BIT;
1122                 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1123         }
1124
1125         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1126         if ((dma_cntrl & DMA_START_BIT)) {
1127                 dma_cntrl &= ~DMA_START_BIT;
1128                 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1129         }
1130
1131         /* Poll for 3seconds (30 * 100ms) on both the lists..*/
1132         while ((l0y || l0uv || l1y || l1uv) && count) {
1133
1134                 if (l0y) {
1135                         l0y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0);
1136                         l0y &= DMA_START_BIT;
1137                         if (!l0y) {
1138                                 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1139                         }
1140                 }
1141
1142                 if (l1y) {
1143                         l1y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1);
1144                         l1y &= DMA_START_BIT;
1145                         if (!l1y) {
1146                                 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1147                         }
1148                 }
1149
1150                 if (l0uv) {
1151                         l0uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0);
1152                         l0uv &= DMA_START_BIT;
1153                         if (!l0uv) {
1154                                 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1155                         }
1156                 }
1157
1158                 if (l1uv) {
1159                         l1uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1);
1160                         l1uv &= DMA_START_BIT;
1161                         if (!l1uv) {
1162                                 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1163                         }
1164                 }
1165                 msleep_interruptible(100);
1166                 count--;
1167         }
1168
1169         hw->rx_list_post_index = 0;
1170
1171         BCMLOG(BCMLOG_SSTEP, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n",
1172                count, hw->rx_list_sts[0], hw->rx_list_sts[1]);
1173 }
1174
1175 static BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, crystalhd_rx_dma_pkt *rx_pkt)
1176 {
1177         uint32_t y_low_addr_reg, y_high_addr_reg;
1178         uint32_t uv_low_addr_reg, uv_high_addr_reg;
1179         addr_64 desc_addr;
1180         unsigned long flags;
1181
1182         if (!hw || !rx_pkt) {
1183                 BCMLOG_ERR("Invalid Arguments\n");
1184                 return BC_STS_INV_ARG;
1185         }
1186
1187         if (hw->rx_list_post_index >= DMA_ENGINE_CNT) {
1188                 BCMLOG_ERR("List Out Of bounds %x\n", hw->rx_list_post_index);
1189                 return BC_STS_INV_ARG;
1190         }
1191
1192         spin_lock_irqsave(&hw->rx_lock, flags);
1193         /* FIXME: jarod: sts_free is an enum for 0, in crystalhd_hw.h... yuk... */
1194         if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) {
1195                 spin_unlock_irqrestore(&hw->rx_lock, flags);
1196                 return BC_STS_BUSY;
1197         }
1198
1199         if (!hw->rx_list_post_index) {
1200                 y_low_addr_reg   = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0;
1201                 y_high_addr_reg  = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0;
1202                 uv_low_addr_reg  = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0;
1203                 uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0;
1204         } else {
1205                 y_low_addr_reg   = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1;
1206                 y_high_addr_reg  = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1;
1207                 uv_low_addr_reg  = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1;
1208                 uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1;
1209         }
1210         rx_pkt->pkt_tag = hw->rx_pkt_tag_seed + hw->rx_list_post_index;
1211         hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_y_intr;
1212         if (rx_pkt->uv_phy_addr)
1213                 hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_uv_intr;
1214         hw->rx_list_post_index = (hw->rx_list_post_index + 1) % DMA_ENGINE_CNT;
1215         spin_unlock_irqrestore(&hw->rx_lock, flags);
1216
1217         crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false, rx_pkt->pkt_tag);
1218
1219         crystalhd_start_rx_dma_engine(hw);
1220         /* Program the Y descriptor */
1221         desc_addr.full_addr = rx_pkt->desc_mem.phy_addr;
1222         crystalhd_reg_wr(hw->adp, y_high_addr_reg, desc_addr.high_part);
1223         crystalhd_reg_wr(hw->adp, y_low_addr_reg, desc_addr.low_part | 0x01);
1224
1225         if (rx_pkt->uv_phy_addr) {
1226                 /* Program the UV descriptor */
1227                 desc_addr.full_addr = rx_pkt->uv_phy_addr;
1228                 crystalhd_reg_wr(hw->adp, uv_high_addr_reg, desc_addr.high_part);
1229                 crystalhd_reg_wr(hw->adp, uv_low_addr_reg, desc_addr.low_part | 0x01);
1230         }
1231
1232         return BC_STS_SUCCESS;
1233 }
1234
1235 static BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw,
1236                                           crystalhd_rx_dma_pkt *rx_pkt)
1237 {
1238         BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
1239
1240         if (sts == BC_STS_BUSY)
1241                 crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt,
1242                                  false, rx_pkt->pkt_tag);
1243
1244         return sts;
1245 }
1246
1247 static void crystalhd_get_dnsz(struct crystalhd_hw *hw, uint32_t list_index,
1248                              uint32_t *y_dw_dnsz, uint32_t *uv_dw_dnsz)
1249 {
1250         uint32_t y_dn_sz_reg, uv_dn_sz_reg;
1251
1252         if (!list_index) {
1253                 y_dn_sz_reg  = MISC1_Y_RX_LIST0_CUR_BYTE_CNT;
1254                 uv_dn_sz_reg = MISC1_UV_RX_LIST0_CUR_BYTE_CNT;
1255         } else {
1256                 y_dn_sz_reg  = MISC1_Y_RX_LIST1_CUR_BYTE_CNT;
1257                 uv_dn_sz_reg = MISC1_UV_RX_LIST1_CUR_BYTE_CNT;
1258         }
1259
1260         *y_dw_dnsz  = crystalhd_reg_rd(hw->adp, y_dn_sz_reg);
1261         *uv_dw_dnsz = crystalhd_reg_rd(hw->adp, uv_dn_sz_reg);
1262 }
1263
1264 /*
1265  * This function should be called only after making sure that the two DMA
1266  * lists are free. This function does not check if DMA's are active, before
1267  * turning off the DMA.
1268  */
1269 static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
1270 {
1271         uint32_t dma_cntrl, aspm;
1272
1273         hw->stop_pending = 0;
1274
1275         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1276         if (dma_cntrl & DMA_START_BIT) {
1277                 dma_cntrl &= ~DMA_START_BIT;
1278                 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1279         }
1280
1281         dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1282         if (dma_cntrl & DMA_START_BIT) {
1283                 dma_cntrl &= ~DMA_START_BIT;
1284                 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1285         }
1286         hw->rx_list_post_index = 0;
1287
1288         aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
1289         aspm |= ASPM_L1_ENABLE;
1290         /* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */
1291         crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
1292 }
1293
1294 static BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t list_index,
1295                                      BC_STATUS comp_sts)
1296 {
1297         crystalhd_rx_dma_pkt *rx_pkt = NULL;
1298         uint32_t y_dw_dnsz, uv_dw_dnsz;
1299         BC_STATUS sts = BC_STS_SUCCESS;
1300
1301         if (!hw || list_index >= DMA_ENGINE_CNT) {
1302                 BCMLOG_ERR("Invalid Arguments\n");
1303                 return BC_STS_INV_ARG;
1304         }
1305
1306         rx_pkt = crystalhd_dioq_find_and_fetch(hw->rx_actq,
1307                                              hw->rx_pkt_tag_seed + list_index);
1308         if (!rx_pkt) {
1309                 BCMLOG_ERR("Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
1310                            hw->rx_list_post_index, hw->rx_list_sts[0],
1311                            hw->rx_list_sts[1], list_index,
1312                            hw->rx_pkt_tag_seed + list_index, comp_sts);
1313                 return BC_STS_INV_ARG;
1314         }
1315
1316         if (comp_sts == BC_STS_SUCCESS) {
1317                 crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz);
1318                 rx_pkt->dio_req->uinfo.y_done_sz = y_dw_dnsz;
1319                 rx_pkt->flags = COMP_FLAG_DATA_VALID;
1320                 if (rx_pkt->uv_phy_addr)
1321                         rx_pkt->dio_req->uinfo.uv_done_sz = uv_dw_dnsz;
1322                 crystalhd_dioq_add(hw->rx_rdyq, rx_pkt, true,
1323                                 hw->rx_pkt_tag_seed + list_index);
1324                 return sts;
1325         }
1326
1327         /* Check if we can post this DIO again. */
1328         return crystalhd_hw_post_cap_buff(hw, rx_pkt);
1329 }
1330
1331 static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts,
1332                                      uint32_t y_err_sts, uint32_t uv_err_sts)
1333 {
1334         uint32_t tmp;
1335         list_sts tmp_lsts;
1336
1337         if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK))
1338                 return false;
1339
1340         tmp_lsts = hw->rx_list_sts[0];
1341
1342         /* Y0 - DMA */
1343         tmp = y_err_sts & GET_Y0_ERR_MSK;
1344         if (int_sts & INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK)
1345                 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1346
1347         if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1348                 hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1349                 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1350         }
1351
1352         if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1353                 hw->rx_list_sts[0] &= ~rx_y_mask;
1354                 hw->rx_list_sts[0] |= rx_y_error;
1355                 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1356         }
1357
1358         if (tmp) {
1359                 hw->rx_list_sts[0] &= ~rx_y_mask;
1360                 hw->rx_list_sts[0] |= rx_y_error;
1361                 hw->rx_list_post_index = 0;
1362         }
1363
1364         /* UV0 - DMA */
1365         tmp = uv_err_sts & GET_UV0_ERR_MSK;
1366         if (int_sts & INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK)
1367                 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1368
1369         if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1370                 hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1371                 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1372         }
1373
1374         if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1375                 hw->rx_list_sts[0] &= ~rx_uv_mask;
1376                 hw->rx_list_sts[0] |= rx_uv_error;
1377                 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1378         }
1379
1380         if (tmp) {
1381                 hw->rx_list_sts[0] &= ~rx_uv_mask;
1382                 hw->rx_list_sts[0] |= rx_uv_error;
1383                 hw->rx_list_post_index = 0;
1384         }
1385
1386         if (y_err_sts & GET_Y0_ERR_MSK) {
1387                 tmp = y_err_sts & GET_Y0_ERR_MSK;
1388                 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1389         }
1390
1391         if (uv_err_sts & GET_UV0_ERR_MSK) {
1392                 tmp = uv_err_sts & GET_UV0_ERR_MSK;
1393                 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1394         }
1395
1396         return (tmp_lsts != hw->rx_list_sts[0]);
1397 }
1398
1399 static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw, uint32_t int_sts,
1400                                      uint32_t y_err_sts, uint32_t uv_err_sts)
1401 {
1402         uint32_t tmp;
1403         list_sts tmp_lsts;
1404
1405         if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK))
1406                 return false;
1407
1408         tmp_lsts = hw->rx_list_sts[1];
1409
1410         /* Y1 - DMA */
1411         tmp = y_err_sts & GET_Y1_ERR_MSK;
1412         if (int_sts & INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK)
1413                 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1414
1415         if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1416                 hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1417                 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1418         }
1419
1420         if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1421                 /* Add retry-support..*/
1422                 hw->rx_list_sts[1] &= ~rx_y_mask;
1423                 hw->rx_list_sts[1] |= rx_y_error;
1424                 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1425         }
1426
1427         if (tmp) {
1428                 hw->rx_list_sts[1] &= ~rx_y_mask;
1429                 hw->rx_list_sts[1] |= rx_y_error;
1430                 hw->rx_list_post_index = 0;
1431         }
1432
1433         /* UV1 - DMA */
1434         tmp = uv_err_sts & GET_UV1_ERR_MSK;
1435         if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK) {
1436                 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1437         }
1438
1439         if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1440                 hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1441                 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1442         }
1443
1444         if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1445                 /* Add retry-support*/
1446                 hw->rx_list_sts[1] &= ~rx_uv_mask;
1447                 hw->rx_list_sts[1] |= rx_uv_error;
1448                 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1449         }
1450
1451         if (tmp) {
1452                 hw->rx_list_sts[1] &= ~rx_uv_mask;
1453                 hw->rx_list_sts[1] |= rx_uv_error;
1454                 hw->rx_list_post_index = 0;
1455         }
1456
1457         if (y_err_sts & GET_Y1_ERR_MSK) {
1458                 tmp = y_err_sts & GET_Y1_ERR_MSK;
1459                 crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1460         }
1461
1462         if (uv_err_sts & GET_UV1_ERR_MSK) {
1463                 tmp = uv_err_sts & GET_UV1_ERR_MSK;
1464                 crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1465         }
1466
1467         return (tmp_lsts != hw->rx_list_sts[1]);
1468 }
1469
1470
1471 static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
1472 {
1473         unsigned long flags;
1474         uint32_t i, list_avail = 0;
1475         BC_STATUS comp_sts = BC_STS_NO_DATA;
1476         uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
1477         bool ret = 0;
1478
1479         if (!hw) {
1480                 BCMLOG_ERR("Invalid Arguments\n");
1481                 return;
1482         }
1483
1484         if (!(intr_sts & GET_RX_INTR_MASK))
1485                 return;
1486
1487         y_err_sts = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_ERROR_STATUS);
1488         uv_err_sts = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_ERROR_STATUS);
1489
1490         for (i = 0; i < DMA_ENGINE_CNT; i++) {
1491                 /* Update States..*/
1492                 spin_lock_irqsave(&hw->rx_lock, flags);
1493                 if (i == 0)
1494                         ret = crystalhd_rx_list0_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1495                 else
1496                         ret = crystalhd_rx_list1_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1497                 if (ret) {
1498                         switch (hw->rx_list_sts[i]) {
1499                         case sts_free:
1500                                 comp_sts = BC_STS_SUCCESS;
1501                                 list_avail = 1;
1502                                 break;
1503                         case rx_y_error:
1504                         case rx_uv_error:
1505                         case rx_sts_error:
1506                                 /* We got error on both or Y or uv. */
1507                                 hw->stats.rx_errors++;
1508                                 crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
1509                                 /* FIXME: jarod: this is where my mini pci-e card is tripping up */
1510                                 BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x "
1511                                        "UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
1512                                        i, hw->stats.rx_errors, y_err_sts,
1513                                        uv_err_sts, intr_sts, y_dn_sz, uv_dn_sz);
1514                                 hw->rx_list_sts[i] = sts_free;
1515                                 comp_sts = BC_STS_ERROR;
1516                                 break;
1517                         default:
1518                                 /* Wait for completion..*/
1519                                 comp_sts = BC_STS_NO_DATA;
1520                                 break;
1521                         }
1522                 }
1523                 spin_unlock_irqrestore(&hw->rx_lock, flags);
1524
1525                 /* handle completion...*/
1526                 if (comp_sts != BC_STS_NO_DATA) {
1527                         crystalhd_rx_pkt_done(hw, i, comp_sts);
1528                         comp_sts = BC_STS_NO_DATA;
1529                 }
1530         }
1531
1532         if (list_avail) {
1533                 if (hw->stop_pending) {
1534                         if ((hw->rx_list_sts[0] == sts_free) &&
1535                             (hw->rx_list_sts[1] == sts_free))
1536                                 crystalhd_hw_finalize_pause(hw);
1537                 } else {
1538                         crystalhd_hw_start_capture(hw);
1539                 }
1540         }
1541 }
1542
1543 static BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
1544                                           BC_FW_CMD *fw_cmd)
1545 {
1546         BC_STATUS sts = BC_STS_SUCCESS;
1547         DecRspChannelStartVideo *st_rsp = NULL;
1548
1549         switch (fw_cmd->cmd[0]) {
1550         case eCMD_C011_DEC_CHAN_START_VIDEO:
1551                 st_rsp = (DecRspChannelStartVideo *)fw_cmd->rsp;
1552                 hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ;
1553                 hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ;
1554                 BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n",
1555                        hw->pib_del_Q_addr, hw->pib_rel_Q_addr);
1556                 break;
1557         case eCMD_C011_INIT:
1558                 if (!(crystalhd_load_firmware_config(hw->adp))) {
1559                         BCMLOG_ERR("Invalid Params.\n");
1560                         sts = BC_STS_FW_AUTH_FAILED;
1561                 }
1562                 break;
1563         default:
1564                 break;
1565         }
1566         return sts;
1567 }
1568
1569 static BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
1570 {
1571         uint32_t reg;
1572         link_misc_perst_decoder_ctrl rst_cntrl_reg;
1573
1574         /* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
1575         rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp, MISC_PERST_DECODER_CTRL);
1576
1577         rst_cntrl_reg.bcm_7412_rst = 1;
1578         crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
1579         msleep_interruptible(50);
1580
1581         rst_cntrl_reg.bcm_7412_rst = 0;
1582         crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
1583
1584         /* Close all banks, put DDR in idle */
1585         bc_dec_reg_wr(hw->adp, SDRAM_PRECHARGE, 0);
1586
1587         /* Set bit 25 (drop CKE pin of DDR) */
1588         reg = bc_dec_reg_rd(hw->adp, SDRAM_PARAM);
1589         reg |= 0x02000000;
1590         bc_dec_reg_wr(hw->adp, SDRAM_PARAM, reg);
1591
1592         /* Reset the audio block */
1593         bc_dec_reg_wr(hw->adp, AUD_DSP_MISC_SOFT_RESET, 0x1);
1594
1595         /* Power down Raptor PLL */
1596         reg = bc_dec_reg_rd(hw->adp, DecHt_PllCCtl);
1597         reg |= 0x00008000;
1598         bc_dec_reg_wr(hw->adp, DecHt_PllCCtl, reg);
1599
1600         /* Power down all Audio PLL */
1601         bc_dec_reg_wr(hw->adp, AIO_MISC_PLL_RESET, 0x1);
1602
1603         /* Power down video clock (75MHz) */
1604         reg = bc_dec_reg_rd(hw->adp, DecHt_PllECtl);
1605         reg |= 0x00008000;
1606         bc_dec_reg_wr(hw->adp, DecHt_PllECtl, reg);
1607
1608         /* Power down video clock (75MHz) */
1609         reg = bc_dec_reg_rd(hw->adp, DecHt_PllDCtl);
1610         reg |= 0x00008000;
1611         bc_dec_reg_wr(hw->adp, DecHt_PllDCtl, reg);
1612
1613         /* Power down core clock (200MHz) */
1614         reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
1615         reg |= 0x00008000;
1616         bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
1617
1618         /* Power down core clock (200MHz) */
1619         reg = bc_dec_reg_rd(hw->adp, DecHt_PllBCtl);
1620         reg |= 0x00008000;
1621         bc_dec_reg_wr(hw->adp, DecHt_PllBCtl, reg);
1622
1623         return BC_STS_SUCCESS;
1624 }
1625
1626 /************************************************
1627 **
1628 *************************************************/
1629
1630 BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, uint32_t sz)
1631 {
1632         uint32_t reg_data, cnt, *temp_buff;
1633         uint32_t fw_sig_len = 36;
1634         uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
1635
1636         BCMLOG_ENTER;
1637
1638         if (!adp || !buffer || !sz) {
1639                 BCMLOG_ERR("Invalid Params.\n");
1640                 return BC_STS_INV_ARG;
1641         }
1642
1643         reg_data = crystalhd_reg_rd(adp, OTP_CMD);
1644         if (!(reg_data & 0x02)) {
1645                 BCMLOG_ERR("Invalid hw config.. otp not programmed\n");
1646                 return BC_STS_ERROR;
1647         }
1648
1649         reg_data = 0;
1650         crystalhd_reg_wr(adp, DCI_CMD, 0);
1651         reg_data |= BC_BIT(0);
1652         crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1653
1654         reg_data = 0;
1655         cnt = 1000;
1656         msleep_interruptible(10);
1657
1658         while (reg_data != BC_BIT(4)) {
1659                 reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1660                 reg_data &= BC_BIT(4);
1661                 if (--cnt == 0) {
1662                         BCMLOG_ERR("Firmware Download RDY Timeout.\n");
1663                         return BC_STS_TIMEOUT;
1664                 }
1665         }
1666
1667         msleep_interruptible(10);
1668         /*  Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */
1669         crystalhd_reg_wr(adp, DCI_FIRMWARE_ADDR, dram_offset);
1670         temp_buff = (uint32_t *)buffer;
1671         for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) {
1672                 crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (dram_offset >> 19));
1673                 crystalhd_reg_wr(adp, DCI_FIRMWARE_DATA, *temp_buff);
1674                 dram_offset += 4;
1675                 temp_buff++;
1676         }
1677         msleep_interruptible(10);
1678
1679         temp_buff++;
1680
1681         sig_reg = (uint32_t)DCI_SIGNATURE_DATA_7;
1682         for (cnt = 0; cnt < 8; cnt++) {
1683                 uint32_t swapped_data = *temp_buff;
1684                 swapped_data = bswap_32_1(swapped_data);
1685                 crystalhd_reg_wr(adp, sig_reg, swapped_data);
1686                 sig_reg -= 4;
1687                 temp_buff++;
1688         }
1689         msleep_interruptible(10);
1690
1691         reg_data = 0;
1692         reg_data |= BC_BIT(1);
1693         crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1694         msleep_interruptible(10);
1695
1696         reg_data = 0;
1697         reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1698
1699         if ((reg_data & BC_BIT(9)) == BC_BIT(9)) {
1700                 cnt = 1000;
1701                 while ((reg_data & BC_BIT(0)) != BC_BIT(0)) {
1702                         reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1703                         reg_data &= BC_BIT(0);
1704                         if (!(--cnt))
1705                                 break;
1706                         msleep_interruptible(10);
1707                 }
1708                 reg_data = 0;
1709                 reg_data = crystalhd_reg_rd(adp, DCI_CMD);
1710                 reg_data |= BC_BIT(4);
1711                 crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1712
1713         } else {
1714                 BCMLOG_ERR("F/w Signature mismatch\n");
1715                 return BC_STS_FW_AUTH_FAILED;
1716         }
1717
1718         BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n");
1719         return BC_STS_SUCCESS;;
1720 }
1721
1722 BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw, BC_FW_CMD *fw_cmd)
1723 {
1724         uint32_t cnt = 0, cmd_res_addr;
1725         uint32_t *cmd_buff, *res_buff;
1726         wait_queue_head_t fw_cmd_event;
1727         int rc = 0;
1728         BC_STATUS sts;
1729
1730         crystalhd_create_event(&fw_cmd_event);
1731
1732         BCMLOG_ENTER;
1733
1734         if (!hw || !fw_cmd) {
1735                 BCMLOG_ERR("Invalid Arguments\n");
1736                 return BC_STS_INV_ARG;
1737         }
1738
1739         cmd_buff = fw_cmd->cmd;
1740         res_buff = fw_cmd->rsp;
1741
1742         if (!cmd_buff || !res_buff) {
1743                 BCMLOG_ERR("Invalid Parameters for F/W Command \n");
1744                 return BC_STS_INV_ARG;
1745         }
1746
1747         hw->pwr_lock++;
1748
1749         hw->fwcmd_evt_sts = 0;
1750         hw->pfw_cmd_event = &fw_cmd_event;
1751
1752         /*Write the command to the memory*/
1753         crystalhd_mem_wr(hw->adp, TS_Host2CpuSnd, FW_CMD_BUFF_SZ, cmd_buff);
1754
1755         /*Memory Read for memory arbitrator flush*/
1756         crystalhd_mem_rd(hw->adp, TS_Host2CpuSnd, 1, &cnt);
1757
1758         /* Write the command address to mailbox */
1759         bc_dec_reg_wr(hw->adp, Hst2CpuMbx1, TS_Host2CpuSnd);
1760         msleep_interruptible(50);
1761
1762         crystalhd_wait_on_event(&fw_cmd_event, hw->fwcmd_evt_sts, 20000, rc, 0);
1763
1764         if (!rc) {
1765                 sts = BC_STS_SUCCESS;
1766         } else if (rc == -EBUSY) {
1767                 BCMLOG_ERR("Firmware command T/O\n");
1768                 sts = BC_STS_TIMEOUT;
1769         } else if (rc == -EINTR) {
1770                 BCMLOG(BCMLOG_DBG, "FwCmd Wait Signal int.\n");
1771                 sts = BC_STS_IO_USER_ABORT;
1772         } else {
1773                 BCMLOG_ERR("FwCmd IO Error.\n");
1774                 sts = BC_STS_IO_ERROR;
1775         }
1776
1777         if (sts != BC_STS_SUCCESS) {
1778                 BCMLOG_ERR("FwCmd Failed.\n");
1779                 hw->pwr_lock--;
1780                 return sts;
1781         }
1782
1783         /*Get the Responce Address*/
1784         cmd_res_addr = bc_dec_reg_rd(hw->adp, Cpu2HstMbx1);
1785
1786         /*Read the Response*/
1787         crystalhd_mem_rd(hw->adp, cmd_res_addr, FW_CMD_BUFF_SZ, res_buff);
1788
1789         hw->pwr_lock--;
1790
1791         if (res_buff[2] != C011_RET_SUCCESS) {
1792                 BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n");
1793                 return BC_STS_FW_CMD_ERR;
1794         }
1795
1796         sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd);
1797         if (sts != BC_STS_SUCCESS)
1798                 BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n");
1799
1800         return sts;
1801 }
1802
1803 bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
1804 {
1805         uint32_t intr_sts = 0;
1806         uint32_t deco_intr = 0;
1807         bool rc = 0;
1808
1809         if (!adp || !hw->dev_started)
1810                 return rc;
1811
1812         hw->stats.num_interrupts++;
1813         hw->pwr_lock++;
1814
1815         deco_intr = bc_dec_reg_rd(adp, Stream2Host_Intr_Sts);
1816         intr_sts  = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
1817
1818         if (intr_sts) {
1819                 /* let system know we processed interrupt..*/
1820                 rc = 1;
1821                 hw->stats.dev_interrupts++;
1822         }
1823
1824         if (deco_intr && (deco_intr != 0xdeaddead)) {
1825
1826                 if (deco_intr & 0x80000000) {
1827                         /*Set the Event and the status flag*/
1828                         if (hw->pfw_cmd_event) {
1829                                 hw->fwcmd_evt_sts = 1;
1830                                 crystalhd_set_event(hw->pfw_cmd_event);
1831                         }
1832                 }
1833
1834                 if (deco_intr & BC_BIT(1))
1835                         crystalhd_hw_proc_pib(hw);
1836
1837                 bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr);
1838                 /* FIXME: jarod: No udelay? might this be the real reason mini pci-e cards were stalling out? */
1839                 bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0);
1840                 rc = 1;
1841         }
1842
1843         /* Rx interrupts */
1844         crystalhd_rx_isr(hw, intr_sts);
1845
1846         /* Tx interrupts*/
1847         crystalhd_tx_isr(hw, intr_sts);
1848
1849         /* Clear interrupts */
1850         if (rc) {
1851                 if (intr_sts)
1852                         crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
1853
1854                 crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
1855         }
1856
1857         hw->pwr_lock--;
1858
1859         return rc;
1860 }
1861
1862 BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw, struct crystalhd_adp *adp)
1863 {
1864         if (!hw || !adp) {
1865                 BCMLOG_ERR("Invalid Arguments\n");
1866                 return BC_STS_INV_ARG;
1867         }
1868
1869         if (hw->dev_started)
1870                 return BC_STS_SUCCESS;
1871
1872         memset(hw, 0, sizeof(struct crystalhd_hw));
1873
1874         hw->adp = adp;
1875         spin_lock_init(&hw->lock);
1876         spin_lock_init(&hw->rx_lock);
1877         /* FIXME: jarod: what are these magic numbers?!? */
1878         hw->tx_ioq_tag_seed = 0x70023070;
1879         hw->rx_pkt_tag_seed = 0x70029070;
1880
1881         hw->stop_pending = 0;
1882         crystalhd_start_device(hw->adp);
1883         hw->dev_started = true;
1884
1885         /* set initial core clock  */
1886         hw->core_clock_mhz = CLOCK_PRESET;
1887         hw->prev_n = 0;
1888         hw->pwr_lock = 0;
1889         crystalhd_hw_set_core_clock(hw);
1890
1891         return BC_STS_SUCCESS;
1892 }
1893
1894 BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw)
1895 {
1896         if (!hw) {
1897                 BCMLOG_ERR("Invalid Arguments\n");
1898                 return BC_STS_INV_ARG;
1899         }
1900
1901         if (!hw->dev_started)
1902                 return BC_STS_SUCCESS;
1903
1904         /* Stop and DDR sleep will happen in here */
1905         crystalhd_hw_suspend(hw);
1906         hw->dev_started = false;
1907
1908         return BC_STS_SUCCESS;
1909 }
1910
1911 BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
1912 {
1913         unsigned int i;
1914         void *mem;
1915         size_t mem_len;
1916         dma_addr_t phy_addr;
1917         BC_STATUS sts = BC_STS_SUCCESS;
1918         crystalhd_rx_dma_pkt *rpkt;
1919
1920         if (!hw || !hw->adp) {
1921                 BCMLOG_ERR("Invalid Arguments\n");
1922                 return BC_STS_INV_ARG;
1923         }
1924
1925         sts = crystalhd_hw_create_ioqs(hw);
1926         if (sts != BC_STS_SUCCESS) {
1927                 BCMLOG_ERR("Failed to create IOQs..\n");
1928                 return sts;
1929         }
1930
1931         mem_len = BC_LINK_MAX_SGLS * sizeof(dma_descriptor);
1932
1933         for (i = 0; i < BC_TX_LIST_CNT; i++) {
1934                 mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1935                 if (mem) {
1936                         memset(mem, 0, mem_len);
1937                 } else {
1938                         BCMLOG_ERR("Insufficient Memory For TX\n");
1939                         crystalhd_hw_free_dma_rings(hw);
1940                         return BC_STS_INSUFF_RES;
1941                 }
1942                 /* rx_pkt_pool -- static memory allocation  */
1943                 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem;
1944                 hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr;
1945                 hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS *
1946                                                  sizeof(dma_descriptor);
1947                 hw->tx_pkt_pool[i].list_tag = 0;
1948
1949                 /* Add TX dma requests to Free Queue..*/
1950                 sts = crystalhd_dioq_add(hw->tx_freeq,
1951                                        &hw->tx_pkt_pool[i], false, 0);
1952                 if (sts != BC_STS_SUCCESS) {
1953                         crystalhd_hw_free_dma_rings(hw);
1954                         return sts;
1955                 }
1956         }
1957
1958         for (i = 0; i < BC_RX_LIST_CNT; i++) {
1959                 rpkt = kzalloc(sizeof(*rpkt), GFP_KERNEL);
1960                 if (!rpkt) {
1961                         BCMLOG_ERR("Insufficient Memory For RX\n");
1962                         crystalhd_hw_free_dma_rings(hw);
1963                         return BC_STS_INSUFF_RES;
1964                 }
1965
1966                 mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1967                 if (mem) {
1968                         memset(mem, 0, mem_len);
1969                 } else {
1970                         BCMLOG_ERR("Insufficient Memory For RX\n");
1971                         crystalhd_hw_free_dma_rings(hw);
1972                         return BC_STS_INSUFF_RES;
1973                 }
1974                 rpkt->desc_mem.pdma_desc_start = mem;
1975                 rpkt->desc_mem.phy_addr = phy_addr;
1976                 rpkt->desc_mem.sz  = BC_LINK_MAX_SGLS * sizeof(dma_descriptor);
1977                 rpkt->pkt_tag = hw->rx_pkt_tag_seed + i;
1978                 crystalhd_hw_free_rx_pkt(hw, rpkt);
1979         }
1980
1981         return BC_STS_SUCCESS;
1982 }
1983
1984 BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
1985 {
1986         unsigned int i;
1987         crystalhd_rx_dma_pkt *rpkt = NULL;
1988
1989         if (!hw || !hw->adp) {
1990                 BCMLOG_ERR("Invalid Arguments\n");
1991                 return BC_STS_INV_ARG;
1992         }
1993
1994         /* Delete all IOQs.. */
1995         crystalhd_hw_delete_ioqs(hw);
1996
1997         for (i = 0; i < BC_TX_LIST_CNT; i++) {
1998                 if (hw->tx_pkt_pool[i].desc_mem.pdma_desc_start) {
1999                         bc_kern_dma_free(hw->adp,
2000                                 hw->tx_pkt_pool[i].desc_mem.sz,
2001                                 hw->tx_pkt_pool[i].desc_mem.pdma_desc_start,
2002                                 hw->tx_pkt_pool[i].desc_mem.phy_addr);
2003
2004                         hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = NULL;
2005                 }
2006         }
2007
2008         BCMLOG(BCMLOG_DBG, "Releasing RX Pkt pool\n");
2009         do {
2010                 rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2011                 if (!rpkt)
2012                         break;
2013                 bc_kern_dma_free(hw->adp, rpkt->desc_mem.sz,
2014                                  rpkt->desc_mem.pdma_desc_start,
2015                                  rpkt->desc_mem.phy_addr);
2016                 kfree(rpkt);
2017         } while (rpkt);
2018
2019         return BC_STS_SUCCESS;
2020 }
2021
2022 BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, crystalhd_dio_req *ioreq,
2023                              hw_comp_callback call_back,
2024                              wait_queue_head_t *cb_event, uint32_t *list_id,
2025                              uint8_t data_flags)
2026 {
2027         tx_dma_pkt *tx_dma_packet = NULL;
2028         uint32_t first_desc_u_addr, first_desc_l_addr;
2029         uint32_t low_addr, high_addr;
2030         addr_64 desc_addr;
2031         BC_STATUS sts, add_sts;
2032         uint32_t dummy_index = 0;
2033         unsigned long flags;
2034         bool rc;
2035
2036         if (!hw || !ioreq || !call_back || !cb_event || !list_id) {
2037                 BCMLOG_ERR("Invalid Arguments\n");
2038                 return BC_STS_INV_ARG;
2039         }
2040
2041         /*
2042          * Since we hit code in busy condition very frequently,
2043          * we will check the code in status first before
2044          * checking the availability of free elem.
2045          *
2046          * This will avoid the Q fetch/add in normal condition.
2047          */
2048         rc = crystalhd_code_in_full(hw->adp, ioreq->uinfo.xfr_len,
2049                                   false, data_flags);
2050         if (rc) {
2051                 hw->stats.cin_busy++;
2052                 return BC_STS_BUSY;
2053         }
2054
2055         /* Get a list from TxFreeQ */
2056         tx_dma_packet = (tx_dma_pkt *)crystalhd_dioq_fetch(hw->tx_freeq);
2057         if (!tx_dma_packet) {
2058                 BCMLOG_ERR("No empty elements..\n");
2059                 return BC_STS_ERR_USAGE;
2060         }
2061
2062         sts = crystalhd_xlat_sgl_to_dma_desc(ioreq,
2063                                            &tx_dma_packet->desc_mem,
2064                                            &dummy_index);
2065         if (sts != BC_STS_SUCCESS) {
2066                 add_sts = crystalhd_dioq_add(hw->tx_freeq, tx_dma_packet,
2067                                            false, 0);
2068                 if (add_sts != BC_STS_SUCCESS)
2069                         BCMLOG_ERR("double fault..\n");
2070
2071                 return sts;
2072         }
2073
2074         hw->pwr_lock++;
2075
2076         desc_addr.full_addr = tx_dma_packet->desc_mem.phy_addr;
2077         low_addr = desc_addr.low_part;
2078         high_addr = desc_addr.high_part;
2079
2080         tx_dma_packet->call_back = call_back;
2081         tx_dma_packet->cb_event  = cb_event;
2082         tx_dma_packet->dio_req   = ioreq;
2083
2084         spin_lock_irqsave(&hw->lock, flags);
2085
2086         if (hw->tx_list_post_index == 0) {
2087                 first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST0;
2088                 first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST0;
2089         } else {
2090                 first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST1;
2091                 first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST1;
2092         }
2093
2094         *list_id = tx_dma_packet->list_tag = hw->tx_ioq_tag_seed +
2095                                              hw->tx_list_post_index;
2096
2097         hw->tx_list_post_index = (hw->tx_list_post_index + 1) % DMA_ENGINE_CNT;
2098
2099         spin_unlock_irqrestore(&hw->lock, flags);
2100
2101
2102         /* Insert in Active Q..*/
2103         crystalhd_dioq_add(hw->tx_actq, tx_dma_packet, false,
2104                          tx_dma_packet->list_tag);
2105
2106         /*
2107          * Interrupt will come as soon as you write
2108          * the valid bit. So be ready for that. All
2109          * the initialization should happen before that.
2110          */
2111         crystalhd_start_tx_dma_engine(hw);
2112         crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part);
2113
2114         crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part | 0x01);
2115                                         /* Be sure we set the valid bit ^^^^ */
2116
2117         return BC_STS_SUCCESS;
2118 }
2119
2120 /*
2121  * This is a force cancel and we are racing with ISR.
2122  *
2123  * Will try to remove the req from ActQ before ISR gets it.
2124  * If ISR gets it first then the completion happens in the
2125  * normal path and we will return _STS_NO_DATA from here.
2126  *
2127  * FIX_ME: Not Tested the actual condition..
2128  */
2129 BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id)
2130 {
2131         if (!hw || !list_id) {
2132                 BCMLOG_ERR("Invalid Arguments\n");
2133                 return BC_STS_INV_ARG;
2134         }
2135
2136         crystalhd_stop_tx_dma_engine(hw);
2137         crystalhd_hw_tx_req_complete(hw, list_id, BC_STS_IO_USER_ABORT);
2138
2139         return BC_STS_SUCCESS;
2140 }
2141
2142 BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
2143                                     crystalhd_dio_req *ioreq, bool en_post)
2144 {
2145         crystalhd_rx_dma_pkt *rpkt;
2146         uint32_t tag, uv_desc_ix = 0;
2147         BC_STATUS sts;
2148
2149         if (!hw || !ioreq) {
2150                 BCMLOG_ERR("Invalid Arguments\n");
2151                 return BC_STS_INV_ARG;
2152         }
2153
2154         rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2155         if (!rpkt) {
2156                 BCMLOG_ERR("Insufficient resources\n");
2157                 return BC_STS_INSUFF_RES;
2158         }
2159
2160         rpkt->dio_req = ioreq;
2161         tag = rpkt->pkt_tag;
2162
2163         sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem, &uv_desc_ix);
2164         if (sts != BC_STS_SUCCESS)
2165                 return sts;
2166
2167         rpkt->uv_phy_addr = 0;
2168
2169         /* Store the address of UV in the rx packet for post*/
2170         if (uv_desc_ix)
2171                 rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr +
2172                                     (sizeof(dma_descriptor) * (uv_desc_ix + 1));
2173
2174         if (en_post)
2175                 sts = crystalhd_hw_post_cap_buff(hw, rpkt);
2176         else
2177                 sts = crystalhd_dioq_add(hw->rx_freeq, rpkt, false, tag);
2178
2179         return sts;
2180 }
2181
2182 BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
2183                                     BC_PIC_INFO_BLOCK *pib,
2184                                     crystalhd_dio_req **ioreq)
2185 {
2186         crystalhd_rx_dma_pkt *rpkt;
2187         uint32_t timeout = BC_PROC_OUTPUT_TIMEOUT / 1000;
2188         uint32_t sig_pending = 0;
2189
2190
2191         if (!hw || !ioreq || !pib) {
2192                 BCMLOG_ERR("Invalid Arguments\n");
2193                 return BC_STS_INV_ARG;
2194         }
2195
2196         rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending);
2197         if (!rpkt) {
2198                 if (sig_pending) {
2199                         BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n", sig_pending);
2200                         return BC_STS_IO_USER_ABORT;
2201                 } else {
2202                         return BC_STS_TIMEOUT;
2203                 }
2204         }
2205
2206         rpkt->dio_req->uinfo.comp_flags = rpkt->flags;
2207
2208         if (rpkt->flags & COMP_FLAG_PIB_VALID)
2209                 memcpy(pib, &rpkt->pib, sizeof(*pib));
2210
2211         *ioreq = rpkt->dio_req;
2212
2213         crystalhd_hw_free_rx_pkt(hw, rpkt);
2214
2215         return BC_STS_SUCCESS;
2216 }
2217
2218 BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw)
2219 {
2220         crystalhd_rx_dma_pkt *rx_pkt;
2221         BC_STATUS sts;
2222         uint32_t i;
2223
2224         if (!hw) {
2225                 BCMLOG_ERR("Invalid Arguments\n");
2226                 return BC_STS_INV_ARG;
2227         }
2228
2229         /* This is start of capture.. Post to both the lists.. */
2230         for (i = 0; i < DMA_ENGINE_CNT; i++) {
2231                 rx_pkt = crystalhd_dioq_fetch(hw->rx_freeq);
2232                 if (!rx_pkt)
2233                         return BC_STS_NO_DATA;
2234                 sts = crystalhd_hw_post_cap_buff(hw, rx_pkt);
2235                 if (BC_STS_SUCCESS != sts)
2236                         break;
2237
2238         }
2239
2240         return BC_STS_SUCCESS;
2241 }
2242
2243 BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw)
2244 {
2245         void *temp = NULL;
2246
2247         if (!hw) {
2248                 BCMLOG_ERR("Invalid Arguments\n");
2249                 return BC_STS_INV_ARG;
2250         }
2251
2252         crystalhd_stop_rx_dma_engine(hw);
2253
2254         do {
2255                 temp = crystalhd_dioq_fetch(hw->rx_freeq);
2256                 if (temp)
2257                         crystalhd_rx_pkt_rel_call_back(hw, temp);
2258         } while (temp);
2259
2260         return BC_STS_SUCCESS;
2261 }
2262
2263 BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw)
2264 {
2265         hw->stats.pause_cnt++;
2266         hw->stop_pending = 1;
2267
2268         if ((hw->rx_list_sts[0] == sts_free) &&
2269             (hw->rx_list_sts[1] == sts_free))
2270                 crystalhd_hw_finalize_pause(hw);
2271
2272         return BC_STS_SUCCESS;
2273 }
2274
2275 BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw)
2276 {
2277         BC_STATUS sts;
2278         uint32_t aspm;
2279
2280         hw->stop_pending = 0;
2281
2282         aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
2283         aspm &= ~ASPM_L1_ENABLE;
2284 /* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */
2285         crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
2286
2287         sts = crystalhd_hw_start_capture(hw);
2288         return sts;
2289 }
2290
2291 BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw)
2292 {
2293         BC_STATUS sts;
2294
2295         if (!hw) {
2296                 BCMLOG_ERR("Invalid Arguments\n");
2297                 return BC_STS_INV_ARG;
2298         }
2299
2300         sts = crystalhd_put_ddr2sleep(hw);
2301         if (sts != BC_STS_SUCCESS) {
2302                 BCMLOG_ERR("Failed to Put DDR To Sleep!!\n");
2303                 return BC_STS_ERROR;
2304         }
2305
2306         if (!crystalhd_stop_device(hw->adp)) {
2307                 BCMLOG_ERR("Failed to Stop Device!!\n");
2308                 return BC_STS_ERROR;
2309         }
2310
2311         return BC_STS_SUCCESS;
2312 }
2313
2314 void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats)
2315 {
2316         if (!hw) {
2317                 BCMLOG_ERR("Invalid Arguments\n");
2318                 return;
2319         }
2320
2321         /* if called w/NULL stats, its a req to zero out the stats */
2322         if (!stats) {
2323                 memset(&hw->stats, 0, sizeof(hw->stats));
2324                 return;
2325         }
2326
2327         hw->stats.freeq_count = crystalhd_dioq_count(hw->rx_freeq);
2328         hw->stats.rdyq_count  = crystalhd_dioq_count(hw->rx_rdyq);
2329         memcpy(stats, &hw->stats, sizeof(*stats));
2330 }
2331
2332 BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw)
2333 {
2334         uint32_t reg, n, i;
2335         uint32_t vco_mg, refresh_reg;
2336
2337         if (!hw) {
2338                 BCMLOG_ERR("Invalid Arguments\n");
2339                 return BC_STS_INV_ARG;
2340         }
2341
2342         /* FIXME: jarod: wha? */
2343         /*n = (hw->core_clock_mhz * 3) / 20 + 1; */
2344         n = hw->core_clock_mhz/5;
2345
2346         if (n == hw->prev_n)
2347                 return BC_STS_CLK_NOCHG;
2348
2349         if (hw->pwr_lock > 0) {
2350                 /* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */
2351                 return BC_STS_CLK_NOCHG;
2352         }
2353
2354         i = n * 27;
2355         if (i < 560)
2356                 vco_mg = 0;
2357         else if (i < 900)
2358                 vco_mg = 1;
2359         else if (i < 1030)
2360                 vco_mg = 2;
2361         else
2362                 vco_mg = 3;
2363
2364         reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2365
2366         reg &= 0xFFFFCFC0;
2367         reg |= n;
2368         reg |= vco_mg << 12;
2369
2370         BCMLOG(BCMLOG_INFO, "clock is moving to %d with n %d with vco_mg %d\n",
2371                hw->core_clock_mhz, n, vco_mg);
2372
2373         /* Change the DRAM refresh rate to accomodate the new frequency */
2374         /* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/
2375         refresh_reg = (7 * hw->core_clock_mhz / 16);
2376         bc_dec_reg_wr(hw->adp, SDRAM_REF_PARAM, ((1 << 12) | refresh_reg));
2377
2378         bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
2379
2380         i = 0;
2381
2382         for (i = 0; i < 10; i++) {
2383                 reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2384
2385                 if (reg & 0x00020000) {
2386                         hw->prev_n = n;
2387                         /* FIXME: jarod: outputting a random "C" is... confusing... */
2388                         BCMLOG(BCMLOG_INFO, "C");
2389                         return BC_STS_SUCCESS;
2390                 } else {
2391                         msleep_interruptible(10);
2392                 }
2393         }
2394         BCMLOG(BCMLOG_INFO, "clk change failed\n");
2395         return BC_STS_CLK_NOCHG;
2396 }