ieee1394: move init_ohci1394_dma to drivers/firewire/
[pandora-kernel.git] / drivers / ieee1394 / ohci1394.c
1 /*
2  * ohci1394.c - driver for OHCI 1394 boards
3  * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4  *                        Gord Peters <GordPeters@smarttech.com>
5  *              2001      Ben Collins <bcollins@debian.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software Foundation,
19  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21
22 /*
23  * Things known to be working:
24  * . Async Request Transmit
25  * . Async Response Receive
26  * . Async Request Receive
27  * . Async Response Transmit
28  * . Iso Receive
29  * . DMA mmap for iso receive
30  * . Config ROM generation
31  *
32  * Things implemented, but still in test phase:
33  * . Iso Transmit
34  * . Async Stream Packets Transmit (Receive done via Iso interface)
35  *
36  * Things not implemented:
37  * . DMA error recovery
38  *
39  * Known bugs:
40  * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41  *   added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42  */
43
44 /*
45  * Acknowledgments:
46  *
47  * Adam J Richter <adam@yggdrasil.com>
48  *  . Use of pci_class to find device
49  *
50  * Emilie Chung <emilie.chung@axis.com>
51  *  . Tip on Async Request Filter
52  *
53  * Pascal Drolet <pascal.drolet@informission.ca>
54  *  . Various tips for optimization and functionnalities
55  *
56  * Robert Ficklin <rficklin@westengineering.com>
57  *  . Loop in irq_handler
58  *
59  * James Goodwin <jamesg@Filanet.com>
60  *  . Various tips on initialization, self-id reception, etc.
61  *
62  * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63  *  . Apple PowerBook detection
64  *
65  * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66  *  . Reset the board properly before leaving + misc cleanups
67  *
68  * Leon van Stuivenberg <leonvs@iae.nl>
69  *  . Bug fixes
70  *
71  * Ben Collins <bcollins@debian.org>
72  *  . Working big-endian support
73  *  . Updated to 2.4.x module scheme (PCI aswell)
74  *  . Config ROM generation
75  *
76  * Manfred Weihs <weihs@ict.tuwien.ac.at>
77  *  . Reworked code for initiating bus resets
78  *    (long, short, with or without hold-off)
79  *
80  * Nandu Santhi <contactnandu@users.sourceforge.net>
81  *  . Added support for nVidia nForce2 onboard Firewire chipset
82  *
83  */
84
85 #include <linux/bitops.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
95 #include <linux/fs.h>
96 #include <linux/poll.h>
97 #include <asm/byteorder.h>
98 #include <asm/atomic.h>
99 #include <asm/uaccess.h>
100 #include <linux/delay.h>
101 #include <linux/spinlock.h>
102
103 #include <asm/pgtable.h>
104 #include <asm/page.h>
105 #include <asm/irq.h>
106 #include <linux/types.h>
107 #include <linux/vmalloc.h>
108 #include <linux/init.h>
109
110 #ifdef CONFIG_PPC_PMAC
111 #include <asm/machdep.h>
112 #include <asm/pmac_feature.h>
113 #include <asm/prom.h>
114 #include <asm/pci-bridge.h>
115 #endif
116
117 #include "csr1212.h"
118 #include "ieee1394.h"
119 #include "ieee1394_types.h"
120 #include "hosts.h"
121 #include "dma.h"
122 #include "iso.h"
123 #include "ieee1394_core.h"
124 #include "highlevel.h"
125 #include "ohci1394.h"
126
127 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
128 #define OHCI1394_DEBUG
129 #endif
130
131 #ifdef DBGMSG
132 #undef DBGMSG
133 #endif
134
135 #ifdef OHCI1394_DEBUG
136 #define DBGMSG(fmt, args...) \
137 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
138 #else
139 #define DBGMSG(fmt, args...) do {} while (0)
140 #endif
141
142 /* print general (card independent) information */
143 #define PRINT_G(level, fmt, args...) \
144 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
145
146 /* print card specific information */
147 #define PRINT(level, fmt, args...) \
148 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
149
150 /* Module Parameters */
151 static int phys_dma = 1;
152 module_param(phys_dma, int, 0444);
153 MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
154
155 static void dma_trm_tasklet(unsigned long data);
156 static void dma_trm_reset(struct dma_trm_ctx *d);
157
158 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
159                              enum context_type type, int ctx, int num_desc,
160                              int buf_size, int split_buf_size, int context_base);
161 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
162
163 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
164                              enum context_type type, int ctx, int num_desc,
165                              int context_base);
166
167 static void ohci1394_pci_remove(struct pci_dev *pdev);
168
169 #ifndef __LITTLE_ENDIAN
170 static const size_t hdr_sizes[] = {
171         3,      /* TCODE_WRITEQ */
172         4,      /* TCODE_WRITEB */
173         3,      /* TCODE_WRITE_RESPONSE */
174         0,      /* reserved */
175         3,      /* TCODE_READQ */
176         4,      /* TCODE_READB */
177         3,      /* TCODE_READQ_RESPONSE */
178         4,      /* TCODE_READB_RESPONSE */
179         1,      /* TCODE_CYCLE_START */
180         4,      /* TCODE_LOCK_REQUEST */
181         2,      /* TCODE_ISO_DATA */
182         4,      /* TCODE_LOCK_RESPONSE */
183                 /* rest is reserved or link-internal */
184 };
185
186 static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
187 {
188         size_t size;
189
190         if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
191                 return;
192
193         size = hdr_sizes[tcode];
194         while (size--)
195                 data[size] = le32_to_cpu(data[size]);
196 }
197 #else
198 #define header_le32_to_cpu(w,x) do {} while (0)
199 #endif /* !LITTLE_ENDIAN */
200
201 /***********************************
202  * IEEE-1394 functionality section *
203  ***********************************/
204
205 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
206 {
207         int i;
208         unsigned long flags;
209         quadlet_t r;
210
211         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
212
213         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
214
215         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
216                 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
217                         break;
218
219                 mdelay(1);
220         }
221
222         r = reg_read(ohci, OHCI1394_PhyControl);
223
224         if (i >= OHCI_LOOP_COUNT)
225                 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
226                        r, r & 0x80000000, i);
227
228         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
229
230         return (r & 0x00ff0000) >> 16;
231 }
232
233 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
234 {
235         int i;
236         unsigned long flags;
237         u32 r = 0;
238
239         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
240
241         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
242
243         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
244                 r = reg_read(ohci, OHCI1394_PhyControl);
245                 if (!(r & 0x00004000))
246                         break;
247
248                 mdelay(1);
249         }
250
251         if (i == OHCI_LOOP_COUNT)
252                 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
253                        r, r & 0x00004000, i);
254
255         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
256
257         return;
258 }
259
260 /* Or's our value into the current value */
261 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
262 {
263         u8 old;
264
265         old = get_phy_reg (ohci, addr);
266         old |= data;
267         set_phy_reg (ohci, addr, old);
268
269         return;
270 }
271
272 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
273                                 int phyid, int isroot)
274 {
275         quadlet_t *q = ohci->selfid_buf_cpu;
276         quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
277         size_t size;
278         quadlet_t q0, q1;
279
280         /* Check status of self-id reception */
281
282         if (ohci->selfid_swap)
283                 q0 = le32_to_cpu(q[0]);
284         else
285                 q0 = q[0];
286
287         if ((self_id_count & 0x80000000) ||
288             ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
289                 PRINT(KERN_ERR,
290                       "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
291                       self_id_count, q0, ohci->self_id_errors);
292
293                 /* Tip by James Goodwin <jamesg@Filanet.com>:
294                  * We had an error, generate another bus reset in response.  */
295                 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
296                         set_phy_reg_mask (ohci, 1, 0x40);
297                         ohci->self_id_errors++;
298                 } else {
299                         PRINT(KERN_ERR,
300                               "Too many errors on SelfID error reception, giving up!");
301                 }
302                 return;
303         }
304
305         /* SelfID Ok, reset error counter. */
306         ohci->self_id_errors = 0;
307
308         size = ((self_id_count & 0x00001FFC) >> 2) - 1;
309         q++;
310
311         while (size > 0) {
312                 if (ohci->selfid_swap) {
313                         q0 = le32_to_cpu(q[0]);
314                         q1 = le32_to_cpu(q[1]);
315                 } else {
316                         q0 = q[0];
317                         q1 = q[1];
318                 }
319
320                 if (q0 == ~q1) {
321                         DBGMSG ("SelfID packet 0x%x received", q0);
322                         hpsb_selfid_received(host, cpu_to_be32(q0));
323                         if (((q0 & 0x3f000000) >> 24) == phyid)
324                                 DBGMSG ("SelfID for this node is 0x%08x", q0);
325                 } else {
326                         PRINT(KERN_ERR,
327                               "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
328                 }
329                 q += 2;
330                 size -= 2;
331         }
332
333         DBGMSG("SelfID complete");
334
335         return;
336 }
337
338 static void ohci_soft_reset(struct ti_ohci *ohci) {
339         int i;
340
341         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
342
343         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
344                 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
345                         break;
346                 mdelay(1);
347         }
348         DBGMSG ("Soft reset finished");
349 }
350
351
352 /* Generate the dma receive prgs and start the context */
353 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
354 {
355         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
356         int i;
357
358         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
359
360         for (i=0; i<d->num_desc; i++) {
361                 u32 c;
362
363                 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
364                 if (generate_irq)
365                         c |= DMA_CTL_IRQ;
366
367                 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
368
369                 /* End of descriptor list? */
370                 if (i + 1 < d->num_desc) {
371                         d->prg_cpu[i]->branchAddress =
372                                 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
373                 } else {
374                         d->prg_cpu[i]->branchAddress =
375                                 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
376                 }
377
378                 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
379                 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
380         }
381
382         d->buf_ind = 0;
383         d->buf_offset = 0;
384
385         if (d->type == DMA_CTX_ISO) {
386                 /* Clear contextControl */
387                 reg_write(ohci, d->ctrlClear, 0xffffffff);
388
389                 /* Set bufferFill, isochHeader, multichannel for IR context */
390                 reg_write(ohci, d->ctrlSet, 0xd0000000);
391
392                 /* Set the context match register to match on all tags */
393                 reg_write(ohci, d->ctxtMatch, 0xf0000000);
394
395                 /* Clear the multi channel mask high and low registers */
396                 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
397                 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
398
399                 /* Set up isoRecvIntMask to generate interrupts */
400                 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
401         }
402
403         /* Tell the controller where the first AR program is */
404         reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
405
406         /* Run context */
407         reg_write(ohci, d->ctrlSet, 0x00008000);
408
409         DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
410 }
411
412 /* Initialize the dma transmit context */
413 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
414 {
415         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
416
417         /* Stop the context */
418         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
419
420         d->prg_ind = 0;
421         d->sent_ind = 0;
422         d->free_prgs = d->num_desc;
423         d->branchAddrPtr = NULL;
424         INIT_LIST_HEAD(&d->fifo_list);
425         INIT_LIST_HEAD(&d->pending_list);
426
427         if (d->type == DMA_CTX_ISO) {
428                 /* enable interrupts */
429                 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
430         }
431
432         DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
433 }
434
435 /* Count the number of available iso contexts */
436 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
437 {
438         u32 tmp;
439
440         reg_write(ohci, reg, 0xffffffff);
441         tmp = reg_read(ohci, reg);
442
443         DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
444
445         /* Count the number of contexts */
446         return hweight32(tmp);
447 }
448
449 /* Global initialization */
450 static void ohci_initialize(struct ti_ohci *ohci)
451 {
452         quadlet_t buf;
453         int num_ports, i;
454
455         spin_lock_init(&ohci->phy_reg_lock);
456
457         /* Put some defaults to these undefined bus options */
458         buf = reg_read(ohci, OHCI1394_BusOptions);
459         buf |=  0x60000000; /* Enable CMC and ISC */
460         if (hpsb_disable_irm)
461                 buf &= ~0x80000000;
462         else
463                 buf |=  0x80000000; /* Enable IRMC */
464         buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
465         buf &= ~0x18000000; /* Disable PMC and BMC */
466         reg_write(ohci, OHCI1394_BusOptions, buf);
467
468         /* Set the bus number */
469         reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
470
471         /* Enable posted writes */
472         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
473
474         /* Clear link control register */
475         reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
476
477         /* Enable cycle timer and cycle master and set the IRM
478          * contender bit in our self ID packets if appropriate. */
479         reg_write(ohci, OHCI1394_LinkControlSet,
480                   OHCI1394_LinkControl_CycleTimerEnable |
481                   OHCI1394_LinkControl_CycleMaster);
482         i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
483         if (hpsb_disable_irm)
484                 i &= ~PHY_04_CONTENDER;
485         else
486                 i |= PHY_04_CONTENDER;
487         set_phy_reg(ohci, 4, i);
488
489         /* Set up self-id dma buffer */
490         reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
491
492         /* enable self-id */
493         reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
494
495         /* Set the Config ROM mapping register */
496         reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
497
498         /* Now get our max packet size */
499         ohci->max_packet_size =
500                 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
501                 
502         /* Clear the interrupt mask */
503         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
504         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
505
506         /* Clear the interrupt mask */
507         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
508         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
509
510         /* Initialize AR dma */
511         initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
512         initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
513
514         /* Initialize AT dma */
515         initialize_dma_trm_ctx(&ohci->at_req_context);
516         initialize_dma_trm_ctx(&ohci->at_resp_context);
517         
518         /* Accept AR requests from all nodes */
519         reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
520
521         /* Set the address range of the physical response unit.
522          * Most controllers do not implement it as a writable register though.
523          * They will keep a hardwired offset of 0x00010000 and show 0x0 as
524          * register content.
525          * To actually enable physical responses is the job of our interrupt
526          * handler which programs the physical request filter. */
527         reg_write(ohci, OHCI1394_PhyUpperBound,
528                   OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
529
530         DBGMSG("physUpperBoundOffset=%08x",
531                reg_read(ohci, OHCI1394_PhyUpperBound));
532
533         /* Specify AT retries */
534         reg_write(ohci, OHCI1394_ATRetries,
535                   OHCI1394_MAX_AT_REQ_RETRIES |
536                   (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
537                   (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
538
539         /* We don't want hardware swapping */
540         reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
541
542         /* Enable interrupts */
543         reg_write(ohci, OHCI1394_IntMaskSet,
544                   OHCI1394_unrecoverableError |
545                   OHCI1394_masterIntEnable |
546                   OHCI1394_busReset |
547                   OHCI1394_selfIDComplete |
548                   OHCI1394_RSPkt |
549                   OHCI1394_RQPkt |
550                   OHCI1394_respTxComplete |
551                   OHCI1394_reqTxComplete |
552                   OHCI1394_isochRx |
553                   OHCI1394_isochTx |
554                   OHCI1394_postedWriteErr |
555                   OHCI1394_cycleTooLong |
556                   OHCI1394_cycleInconsistent);
557
558         /* Enable link */
559         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
560
561         buf = reg_read(ohci, OHCI1394_Version);
562         PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d]  "
563               "MMIO=[%llx-%llx]  Max Packet=[%d]  IR/IT contexts=[%d/%d]",
564               ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
565               ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
566               (unsigned long long)pci_resource_start(ohci->dev, 0),
567               (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
568               ohci->max_packet_size,
569               ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
570
571         /* Check all of our ports to make sure that if anything is
572          * connected, we enable that port. */
573         num_ports = get_phy_reg(ohci, 2) & 0xf;
574         for (i = 0; i < num_ports; i++) {
575                 unsigned int status;
576
577                 set_phy_reg(ohci, 7, i);
578                 status = get_phy_reg(ohci, 8);
579
580                 if (status & 0x20)
581                         set_phy_reg(ohci, 8, status & ~1);
582         }
583
584         /* Serial EEPROM Sanity check. */
585         if ((ohci->max_packet_size < 512) ||
586             (ohci->max_packet_size > 4096)) {
587                 /* Serial EEPROM contents are suspect, set a sane max packet
588                  * size and print the raw contents for bug reports if verbose
589                  * debug is enabled. */
590 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
591                 int i;
592 #endif
593
594                 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
595                       "attempting to set max_packet_size to 512 bytes");
596                 reg_write(ohci, OHCI1394_BusOptions,
597                           (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
598                 ohci->max_packet_size = 512;
599 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
600                 PRINT(KERN_DEBUG, "    EEPROM Present: %d",
601                       (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
602                 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
603
604                 for (i = 0;
605                      ((i < 1000) &&
606                       (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
607                         udelay(10);
608
609                 for (i = 0; i < 0x20; i++) {
610                         reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
611                         PRINT(KERN_DEBUG, "    EEPROM %02x: %02x", i,
612                               (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
613                 }
614 #endif
615         }
616 }
617
618 /*
619  * Insert a packet in the DMA fifo and generate the DMA prg
620  * FIXME: rewrite the program in order to accept packets crossing
621  *        page boundaries.
622  *        check also that a single dma descriptor doesn't cross a
623  *        page boundary.
624  */
625 static void insert_packet(struct ti_ohci *ohci,
626                           struct dma_trm_ctx *d, struct hpsb_packet *packet)
627 {
628         u32 cycleTimer;
629         int idx = d->prg_ind;
630
631         DBGMSG("Inserting packet for node " NODE_BUS_FMT
632                ", tlabel=%d, tcode=0x%x, speed=%d",
633                NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
634                packet->tcode, packet->speed_code);
635
636         d->prg_cpu[idx]->begin.address = 0;
637         d->prg_cpu[idx]->begin.branchAddress = 0;
638
639         if (d->type == DMA_CTX_ASYNC_RESP) {
640                 /*
641                  * For response packets, we need to put a timeout value in
642                  * the 16 lower bits of the status... let's try 1 sec timeout
643                  */
644                 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
645                 d->prg_cpu[idx]->begin.status = cpu_to_le32(
646                         (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
647                         ((cycleTimer&0x01fff000)>>12));
648
649                 DBGMSG("cycleTimer: %08x timeStamp: %08x",
650                        cycleTimer, d->prg_cpu[idx]->begin.status);
651         } else 
652                 d->prg_cpu[idx]->begin.status = 0;
653
654         if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
655
656                 if (packet->type == hpsb_raw) {
657                         d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
658                         d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
659                         d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
660                 } else {
661                         d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
662                                 (packet->header[0] & 0xFFFF);
663
664                         if (packet->tcode == TCODE_ISO_DATA) {
665                                 /* Sending an async stream packet */
666                                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
667                         } else {
668                                 /* Sending a normal async request or response */
669                                 d->prg_cpu[idx]->data[1] =
670                                         (packet->header[1] & 0xFFFF) |
671                                         (packet->header[0] & 0xFFFF0000);
672                                 d->prg_cpu[idx]->data[2] = packet->header[2];
673                                 d->prg_cpu[idx]->data[3] = packet->header[3];
674                         }
675                         header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
676                 }
677
678                 if (packet->data_size) { /* block transmit */
679                         if (packet->tcode == TCODE_STREAM_DATA){
680                                 d->prg_cpu[idx]->begin.control =
681                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
682                                                     DMA_CTL_IMMEDIATE | 0x8);
683                         } else {
684                                 d->prg_cpu[idx]->begin.control =
685                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
686                                                     DMA_CTL_IMMEDIATE | 0x10);
687                         }
688                         d->prg_cpu[idx]->end.control =
689                                 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
690                                             DMA_CTL_IRQ |
691                                             DMA_CTL_BRANCH |
692                                             packet->data_size);
693                         /*
694                          * Check that the packet data buffer
695                          * does not cross a page boundary.
696                          *
697                          * XXX Fix this some day. eth1394 seems to trigger
698                          * it, but ignoring it doesn't seem to cause a
699                          * problem.
700                          */
701 #if 0
702                         if (cross_bound((unsigned long)packet->data,
703                                         packet->data_size)>0) {
704                                 /* FIXME: do something about it */
705                                 PRINT(KERN_ERR,
706                                       "%s: packet data addr: %p size %Zd bytes "
707                                       "cross page boundary", __func__,
708                                       packet->data, packet->data_size);
709                         }
710 #endif
711                         d->prg_cpu[idx]->end.address = cpu_to_le32(
712                                 pci_map_single(ohci->dev, packet->data,
713                                                packet->data_size,
714                                                PCI_DMA_TODEVICE));
715
716                         d->prg_cpu[idx]->end.branchAddress = 0;
717                         d->prg_cpu[idx]->end.status = 0;
718                         if (d->branchAddrPtr)
719                                 *(d->branchAddrPtr) =
720                                         cpu_to_le32(d->prg_bus[idx] | 0x3);
721                         d->branchAddrPtr =
722                                 &(d->prg_cpu[idx]->end.branchAddress);
723                 } else { /* quadlet transmit */
724                         if (packet->type == hpsb_raw)
725                                 d->prg_cpu[idx]->begin.control =
726                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
727                                                     DMA_CTL_IMMEDIATE |
728                                                     DMA_CTL_IRQ |
729                                                     DMA_CTL_BRANCH |
730                                                     (packet->header_size + 4));
731                         else
732                                 d->prg_cpu[idx]->begin.control =
733                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
734                                                     DMA_CTL_IMMEDIATE |
735                                                     DMA_CTL_IRQ |
736                                                     DMA_CTL_BRANCH |
737                                                     packet->header_size);
738
739                         if (d->branchAddrPtr)
740                                 *(d->branchAddrPtr) =
741                                         cpu_to_le32(d->prg_bus[idx] | 0x2);
742                         d->branchAddrPtr =
743                                 &(d->prg_cpu[idx]->begin.branchAddress);
744                 }
745
746         } else { /* iso packet */
747                 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
748                         (packet->header[0] & 0xFFFF);
749                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
750                 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
751
752                 d->prg_cpu[idx]->begin.control =
753                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
754                                     DMA_CTL_IMMEDIATE | 0x8);
755                 d->prg_cpu[idx]->end.control =
756                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
757                                     DMA_CTL_UPDATE |
758                                     DMA_CTL_IRQ |
759                                     DMA_CTL_BRANCH |
760                                     packet->data_size);
761                 d->prg_cpu[idx]->end.address = cpu_to_le32(
762                                 pci_map_single(ohci->dev, packet->data,
763                                 packet->data_size, PCI_DMA_TODEVICE));
764
765                 d->prg_cpu[idx]->end.branchAddress = 0;
766                 d->prg_cpu[idx]->end.status = 0;
767                 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
768                        "                       begin=%08x %08x %08x %08x\n"
769                        "                             %08x %08x %08x %08x\n"
770                        "                       end  =%08x %08x %08x %08x",
771                        d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
772                        d->prg_cpu[idx]->begin.control,
773                        d->prg_cpu[idx]->begin.address,
774                        d->prg_cpu[idx]->begin.branchAddress,
775                        d->prg_cpu[idx]->begin.status,
776                        d->prg_cpu[idx]->data[0],
777                        d->prg_cpu[idx]->data[1],
778                        d->prg_cpu[idx]->data[2],
779                        d->prg_cpu[idx]->data[3],
780                        d->prg_cpu[idx]->end.control,
781                        d->prg_cpu[idx]->end.address,
782                        d->prg_cpu[idx]->end.branchAddress,
783                        d->prg_cpu[idx]->end.status);
784                 if (d->branchAddrPtr)
785                         *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
786                 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
787         }
788         d->free_prgs--;
789
790         /* queue the packet in the appropriate context queue */
791         list_add_tail(&packet->driver_list, &d->fifo_list);
792         d->prg_ind = (d->prg_ind + 1) % d->num_desc;
793 }
794
795 /*
796  * This function fills the FIFO with the (eventual) pending packets
797  * and runs or wakes up the DMA prg if necessary.
798  *
799  * The function MUST be called with the d->lock held.
800  */
801 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
802 {
803         struct hpsb_packet *packet, *ptmp;
804         int idx = d->prg_ind;
805         int z = 0;
806
807         /* insert the packets into the dma fifo */
808         list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
809                 if (!d->free_prgs)
810                         break;
811
812                 /* For the first packet only */
813                 if (!z)
814                         z = (packet->data_size) ? 3 : 2;
815
816                 /* Insert the packet */
817                 list_del_init(&packet->driver_list);
818                 insert_packet(ohci, d, packet);
819         }
820
821         /* Nothing must have been done, either no free_prgs or no packets */
822         if (z == 0)
823                 return;
824
825         /* Is the context running ? (should be unless it is
826            the first packet to be sent in this context) */
827         if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
828                 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
829
830                 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
831                 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
832
833                 /* Check that the node id is valid, and not 63 */
834                 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
835                         PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
836                 else
837                         reg_write(ohci, d->ctrlSet, 0x8000);
838         } else {
839                 /* Wake up the dma context if necessary */
840                 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
841                         DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
842
843                 /* do this always, to avoid race condition */
844                 reg_write(ohci, d->ctrlSet, 0x1000);
845         }
846
847         return;
848 }
849
850 /* Transmission of an async or iso packet */
851 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
852 {
853         struct ti_ohci *ohci = host->hostdata;
854         struct dma_trm_ctx *d;
855         unsigned long flags;
856
857         if (packet->data_size > ohci->max_packet_size) {
858                 PRINT(KERN_ERR,
859                       "Transmit packet size %Zd is too big",
860                       packet->data_size);
861                 return -EOVERFLOW;
862         }
863
864         if (packet->type == hpsb_raw)
865                 d = &ohci->at_req_context;
866         else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
867                 d = &ohci->at_resp_context;
868         else
869                 d = &ohci->at_req_context;
870
871         spin_lock_irqsave(&d->lock,flags);
872
873         list_add_tail(&packet->driver_list, &d->pending_list);
874
875         dma_trm_flush(ohci, d);
876
877         spin_unlock_irqrestore(&d->lock,flags);
878
879         return 0;
880 }
881
882 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
883 {
884         struct ti_ohci *ohci = host->hostdata;
885         int retval = 0, phy_reg;
886
887         switch (cmd) {
888         case RESET_BUS:
889                 switch (arg) {
890                 case SHORT_RESET:
891                         phy_reg = get_phy_reg(ohci, 5);
892                         phy_reg |= 0x40;
893                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
894                         break;
895                 case LONG_RESET:
896                         phy_reg = get_phy_reg(ohci, 1);
897                         phy_reg |= 0x40;
898                         set_phy_reg(ohci, 1, phy_reg); /* set IBR */
899                         break;
900                 case SHORT_RESET_NO_FORCE_ROOT:
901                         phy_reg = get_phy_reg(ohci, 1);
902                         if (phy_reg & 0x80) {
903                                 phy_reg &= ~0x80;
904                                 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
905                         }
906
907                         phy_reg = get_phy_reg(ohci, 5);
908                         phy_reg |= 0x40;
909                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
910                         break;
911                 case LONG_RESET_NO_FORCE_ROOT:
912                         phy_reg = get_phy_reg(ohci, 1);
913                         phy_reg &= ~0x80;
914                         phy_reg |= 0x40;
915                         set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
916                         break;
917                 case SHORT_RESET_FORCE_ROOT:
918                         phy_reg = get_phy_reg(ohci, 1);
919                         if (!(phy_reg & 0x80)) {
920                                 phy_reg |= 0x80;
921                                 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
922                         }
923
924                         phy_reg = get_phy_reg(ohci, 5);
925                         phy_reg |= 0x40;
926                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
927                         break;
928                 case LONG_RESET_FORCE_ROOT:
929                         phy_reg = get_phy_reg(ohci, 1);
930                         phy_reg |= 0xc0;
931                         set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
932                         break;
933                 default:
934                         retval = -1;
935                 }
936                 break;
937
938         case GET_CYCLE_COUNTER:
939                 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
940                 break;
941
942         case SET_CYCLE_COUNTER:
943                 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
944                 break;
945
946         case SET_BUS_ID:
947                 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
948                 break;
949
950         case ACT_CYCLE_MASTER:
951                 if (arg) {
952                         /* check if we are root and other nodes are present */
953                         u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
954                         if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
955                                 /*
956                                  * enable cycleTimer, cycleMaster
957                                  */
958                                 DBGMSG("Cycle master enabled");
959                                 reg_write(ohci, OHCI1394_LinkControlSet,
960                                           OHCI1394_LinkControl_CycleTimerEnable |
961                                           OHCI1394_LinkControl_CycleMaster);
962                         }
963                 } else {
964                         /* disable cycleTimer, cycleMaster, cycleSource */
965                         reg_write(ohci, OHCI1394_LinkControlClear,
966                                   OHCI1394_LinkControl_CycleTimerEnable |
967                                   OHCI1394_LinkControl_CycleMaster |
968                                   OHCI1394_LinkControl_CycleSource);
969                 }
970                 break;
971
972         case CANCEL_REQUESTS:
973                 DBGMSG("Cancel request received");
974                 dma_trm_reset(&ohci->at_req_context);
975                 dma_trm_reset(&ohci->at_resp_context);
976                 break;
977
978         default:
979                 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
980                         cmd);
981                 break;
982         }
983         return retval;
984 }
985
986 /***********************************
987  * rawiso ISO reception            *
988  ***********************************/
989
990 /*
991   We use either buffer-fill or packet-per-buffer DMA mode. The DMA
992   buffer is split into "blocks" (regions described by one DMA
993   descriptor). Each block must be one page or less in size, and
994   must not cross a page boundary.
995
996   There is one little wrinkle with buffer-fill mode: a packet that
997   starts in the final block may wrap around into the first block. But
998   the user API expects all packets to be contiguous. Our solution is
999   to keep the very last page of the DMA buffer in reserve - if a
1000   packet spans the gap, we copy its tail into this page.
1001 */
1002
1003 struct ohci_iso_recv {
1004         struct ti_ohci *ohci;
1005
1006         struct ohci1394_iso_tasklet task;
1007         int task_active;
1008
1009         enum { BUFFER_FILL_MODE = 0,
1010                PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1011
1012         /* memory and PCI mapping for the DMA descriptors */
1013         struct dma_prog_region prog;
1014         struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1015
1016         /* how many DMA blocks fit in the buffer */
1017         unsigned int nblocks;
1018
1019         /* stride of DMA blocks */
1020         unsigned int buf_stride;
1021
1022         /* number of blocks to batch between interrupts */
1023         int block_irq_interval;
1024
1025         /* block that DMA will finish next */
1026         int block_dma;
1027
1028         /* (buffer-fill only) block that the reader will release next */
1029         int block_reader;
1030
1031         /* (buffer-fill only) bytes of buffer the reader has released,
1032            less than one block */
1033         int released_bytes;
1034
1035         /* (buffer-fill only) buffer offset at which the next packet will appear */
1036         int dma_offset;
1037
1038         /* OHCI DMA context control registers */
1039         u32 ContextControlSet;
1040         u32 ContextControlClear;
1041         u32 CommandPtr;
1042         u32 ContextMatch;
1043 };
1044
1045 static void ohci_iso_recv_task(unsigned long data);
1046 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1047 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1048 static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1049 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1050
1051 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1052 {
1053         struct ti_ohci *ohci = iso->host->hostdata;
1054         struct ohci_iso_recv *recv;
1055         int ctx;
1056         int ret = -ENOMEM;
1057
1058         recv = kmalloc(sizeof(*recv), GFP_KERNEL);
1059         if (!recv)
1060                 return -ENOMEM;
1061
1062         iso->hostdata = recv;
1063         recv->ohci = ohci;
1064         recv->task_active = 0;
1065         dma_prog_region_init(&recv->prog);
1066         recv->block = NULL;
1067
1068         /* use buffer-fill mode, unless irq_interval is 1
1069            (note: multichannel requires buffer-fill) */
1070
1071         if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1072              iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1073                 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1074         } else {
1075                 recv->dma_mode = BUFFER_FILL_MODE;
1076         }
1077
1078         /* set nblocks, buf_stride, block_irq_interval */
1079
1080         if (recv->dma_mode == BUFFER_FILL_MODE) {
1081                 recv->buf_stride = PAGE_SIZE;
1082
1083                 /* one block per page of data in the DMA buffer, minus the final guard page */
1084                 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1085                 if (recv->nblocks < 3) {
1086                         DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1087                         goto err;
1088                 }
1089
1090                 /* iso->irq_interval is in packets - translate that to blocks */
1091                 if (iso->irq_interval == 1)
1092                         recv->block_irq_interval = 1;
1093                 else
1094                         recv->block_irq_interval = iso->irq_interval *
1095                                                         ((recv->nblocks+1)/iso->buf_packets);
1096                 if (recv->block_irq_interval*4 > recv->nblocks)
1097                         recv->block_irq_interval = recv->nblocks/4;
1098                 if (recv->block_irq_interval < 1)
1099                         recv->block_irq_interval = 1;
1100
1101         } else {
1102                 int max_packet_size;
1103
1104                 recv->nblocks = iso->buf_packets;
1105                 recv->block_irq_interval = iso->irq_interval;
1106                 if (recv->block_irq_interval * 4 > iso->buf_packets)
1107                         recv->block_irq_interval = iso->buf_packets / 4;
1108                 if (recv->block_irq_interval < 1)
1109                         recv->block_irq_interval = 1;
1110
1111                 /* choose a buffer stride */
1112                 /* must be a power of 2, and <= PAGE_SIZE */
1113
1114                 max_packet_size = iso->buf_size / iso->buf_packets;
1115
1116                 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1117                     recv->buf_stride *= 2);
1118
1119                 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1120                    recv->buf_stride > PAGE_SIZE) {
1121                         /* this shouldn't happen, but anyway... */
1122                         DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1123                         goto err;
1124                 }
1125         }
1126
1127         recv->block_reader = 0;
1128         recv->released_bytes = 0;
1129         recv->block_dma = 0;
1130         recv->dma_offset = 0;
1131
1132         /* size of DMA program = one descriptor per block */
1133         if (dma_prog_region_alloc(&recv->prog,
1134                                  sizeof(struct dma_cmd) * recv->nblocks,
1135                                  recv->ohci->dev))
1136                 goto err;
1137
1138         recv->block = (struct dma_cmd*) recv->prog.kvirt;
1139
1140         ohci1394_init_iso_tasklet(&recv->task,
1141                                   iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1142                                                        OHCI_ISO_RECEIVE,
1143                                   ohci_iso_recv_task, (unsigned long) iso);
1144
1145         if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1146                 ret = -EBUSY;
1147                 goto err;
1148         }
1149
1150         recv->task_active = 1;
1151
1152         /* recv context registers are spaced 32 bytes apart */
1153         ctx = recv->task.context;
1154         recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1155         recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1156         recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1157         recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1158
1159         if (iso->channel == -1) {
1160                 /* clear multi-channel selection mask */
1161                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1162                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1163         }
1164
1165         /* write the DMA program */
1166         ohci_iso_recv_program(iso);
1167
1168         DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1169                " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1170                recv->dma_mode == BUFFER_FILL_MODE ?
1171                "buffer-fill" : "packet-per-buffer",
1172                iso->buf_size/PAGE_SIZE, iso->buf_size,
1173                recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1174
1175         return 0;
1176
1177 err:
1178         ohci_iso_recv_shutdown(iso);
1179         return ret;
1180 }
1181
1182 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1183 {
1184         struct ohci_iso_recv *recv = iso->hostdata;
1185
1186         /* disable interrupts */
1187         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1188
1189         /* halt DMA */
1190         ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1191 }
1192
1193 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1194 {
1195         struct ohci_iso_recv *recv = iso->hostdata;
1196
1197         if (recv->task_active) {
1198                 ohci_iso_recv_stop(iso);
1199                 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1200                 recv->task_active = 0;
1201         }
1202
1203         dma_prog_region_free(&recv->prog);
1204         kfree(recv);
1205         iso->hostdata = NULL;
1206 }
1207
1208 /* set up a "gapped" ring buffer DMA program */
1209 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1210 {
1211         struct ohci_iso_recv *recv = iso->hostdata;
1212         int blk;
1213
1214         /* address of 'branch' field in previous DMA descriptor */
1215         u32 *prev_branch = NULL;
1216
1217         for (blk = 0; blk < recv->nblocks; blk++) {
1218                 u32 control;
1219
1220                 /* the DMA descriptor */
1221                 struct dma_cmd *cmd = &recv->block[blk];
1222
1223                 /* offset of the DMA descriptor relative to the DMA prog buffer */
1224                 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1225
1226                 /* offset of this packet's data within the DMA buffer */
1227                 unsigned long buf_offset = blk * recv->buf_stride;
1228
1229                 if (recv->dma_mode == BUFFER_FILL_MODE) {
1230                         control = 2 << 28; /* INPUT_MORE */
1231                 } else {
1232                         control = 3 << 28; /* INPUT_LAST */
1233                 }
1234
1235                 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1236
1237                 /* interrupt on last block, and at intervals */
1238                 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1239                         control |= 3 << 20; /* want interrupt */
1240                 }
1241
1242                 control |= 3 << 18; /* enable branch to address */
1243                 control |= recv->buf_stride;
1244
1245                 cmd->control = cpu_to_le32(control);
1246                 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1247                 cmd->branchAddress = 0; /* filled in on next loop */
1248                 cmd->status = cpu_to_le32(recv->buf_stride);
1249
1250                 /* link the previous descriptor to this one */
1251                 if (prev_branch) {
1252                         *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1253                 }
1254
1255                 prev_branch = &cmd->branchAddress;
1256         }
1257
1258         /* the final descriptor's branch address and Z should be left at 0 */
1259 }
1260
1261 /* listen or unlisten to a specific channel (multi-channel mode only) */
1262 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1263 {
1264         struct ohci_iso_recv *recv = iso->hostdata;
1265         int reg, i;
1266
1267         if (channel < 32) {
1268                 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1269                 i = channel;
1270         } else {
1271                 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1272                 i = channel - 32;
1273         }
1274
1275         reg_write(recv->ohci, reg, (1 << i));
1276
1277         /* issue a dummy read to force all PCI writes to be posted immediately */
1278         mb();
1279         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1280 }
1281
1282 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1283 {
1284         struct ohci_iso_recv *recv = iso->hostdata;
1285         int i;
1286
1287         for (i = 0; i < 64; i++) {
1288                 if (mask & (1ULL << i)) {
1289                         if (i < 32)
1290                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1291                         else
1292                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1293                 } else {
1294                         if (i < 32)
1295                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1296                         else
1297                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1298                 }
1299         }
1300
1301         /* issue a dummy read to force all PCI writes to be posted immediately */
1302         mb();
1303         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1304 }
1305
1306 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1307 {
1308         struct ohci_iso_recv *recv = iso->hostdata;
1309         struct ti_ohci *ohci = recv->ohci;
1310         u32 command, contextMatch;
1311
1312         reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1313         wmb();
1314
1315         /* always keep ISO headers */
1316         command = (1 << 30);
1317
1318         if (recv->dma_mode == BUFFER_FILL_MODE)
1319                 command |= (1 << 31);
1320
1321         reg_write(recv->ohci, recv->ContextControlSet, command);
1322
1323         /* match on specified tags */
1324         contextMatch = tag_mask << 28;
1325
1326         if (iso->channel == -1) {
1327                 /* enable multichannel reception */
1328                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1329         } else {
1330                 /* listen on channel */
1331                 contextMatch |= iso->channel;
1332         }
1333
1334         if (cycle != -1) {
1335                 u32 seconds;
1336
1337                 /* enable cycleMatch */
1338                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1339
1340                 /* set starting cycle */
1341                 cycle &= 0x1FFF;
1342
1343                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1344                    just snarf them from the current time */
1345                 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1346
1347                 /* advance one second to give some extra time for DMA to start */
1348                 seconds += 1;
1349
1350                 cycle |= (seconds & 3) << 13;
1351
1352                 contextMatch |= cycle << 12;
1353         }
1354
1355         if (sync != -1) {
1356                 /* set sync flag on first DMA descriptor */
1357                 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1358                 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1359
1360                 /* match sync field */
1361                 contextMatch |= (sync&0xf)<<8;
1362         }
1363
1364         reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1365
1366         /* address of first descriptor block */
1367         command = dma_prog_region_offset_to_bus(&recv->prog,
1368                                                 recv->block_dma * sizeof(struct dma_cmd));
1369         command |= 1; /* Z=1 */
1370
1371         reg_write(recv->ohci, recv->CommandPtr, command);
1372
1373         /* enable interrupts */
1374         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1375
1376         wmb();
1377
1378         /* run */
1379         reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1380
1381         /* issue a dummy read of the cycle timer register to force
1382            all PCI writes to be posted immediately */
1383         mb();
1384         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1385
1386         /* check RUN */
1387         if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1388                 PRINT(KERN_ERR,
1389                       "Error starting IR DMA (ContextControl 0x%08x)\n",
1390                       reg_read(recv->ohci, recv->ContextControlSet));
1391                 return -1;
1392         }
1393
1394         return 0;
1395 }
1396
1397 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1398 {
1399         /* re-use the DMA descriptor for the block */
1400         /* by linking the previous descriptor to it */
1401
1402         int next_i = block;
1403         int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1404
1405         struct dma_cmd *next = &recv->block[next_i];
1406         struct dma_cmd *prev = &recv->block[prev_i];
1407         
1408         /* ignore out-of-range requests */
1409         if ((block < 0) || (block > recv->nblocks))
1410                 return;
1411
1412         /* 'next' becomes the new end of the DMA chain,
1413            so disable branch and enable interrupt */
1414         next->branchAddress = 0;
1415         next->control |= cpu_to_le32(3 << 20);
1416         next->status = cpu_to_le32(recv->buf_stride);
1417
1418         /* link prev to next */
1419         prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1420                                                                         sizeof(struct dma_cmd) * next_i)
1421                                           | 1); /* Z=1 */
1422
1423         /* disable interrupt on previous DMA descriptor, except at intervals */
1424         if ((prev_i % recv->block_irq_interval) == 0) {
1425                 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1426         } else {
1427                 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1428         }
1429         wmb();
1430
1431         /* wake up DMA in case it fell asleep */
1432         reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1433 }
1434
1435 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1436                                              struct hpsb_iso_packet_info *info)
1437 {
1438         /* release the memory where the packet was */
1439         recv->released_bytes += info->total_len;
1440
1441         /* have we released enough memory for one block? */
1442         while (recv->released_bytes > recv->buf_stride) {
1443                 ohci_iso_recv_release_block(recv, recv->block_reader);
1444                 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1445                 recv->released_bytes -= recv->buf_stride;
1446         }
1447 }
1448
1449 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1450 {
1451         struct ohci_iso_recv *recv = iso->hostdata;
1452         if (recv->dma_mode == BUFFER_FILL_MODE) {
1453                 ohci_iso_recv_bufferfill_release(recv, info);
1454         } else {
1455                 ohci_iso_recv_release_block(recv, info - iso->infos);
1456         }
1457 }
1458
1459 /* parse all packets from blocks that have been fully received */
1460 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1461 {
1462         int wake = 0;
1463         int runaway = 0;
1464         struct ti_ohci *ohci = recv->ohci;
1465
1466         while (1) {
1467                 /* we expect the next parsable packet to begin at recv->dma_offset */
1468                 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1469
1470                 unsigned int offset;
1471                 unsigned short len, cycle, total_len;
1472                 unsigned char channel, tag, sy;
1473
1474                 unsigned char *p = iso->data_buf.kvirt;
1475
1476                 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1477
1478                 /* don't loop indefinitely */
1479                 if (runaway++ > 100000) {
1480                         atomic_inc(&iso->overflows);
1481                         PRINT(KERN_ERR,
1482                               "IR DMA error - Runaway during buffer parsing!\n");
1483                         break;
1484                 }
1485
1486                 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1487                 if (this_block == recv->block_dma)
1488                         break;
1489
1490                 wake = 1;
1491
1492                 /* parse data length, tag, channel, and sy */
1493
1494                 /* note: we keep our own local copies of 'len' and 'offset'
1495                    so the user can't mess with them by poking in the mmap area */
1496
1497                 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1498
1499                 if (len > 4096) {
1500                         PRINT(KERN_ERR,
1501                               "IR DMA error - bogus 'len' value %u\n", len);
1502                 }
1503
1504                 channel = p[recv->dma_offset+1] & 0x3F;
1505                 tag = p[recv->dma_offset+1] >> 6;
1506                 sy = p[recv->dma_offset+0] & 0xF;
1507
1508                 /* advance to data payload */
1509                 recv->dma_offset += 4;
1510
1511                 /* check for wrap-around */
1512                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1513                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1514                 }
1515
1516                 /* dma_offset now points to the first byte of the data payload */
1517                 offset = recv->dma_offset;
1518
1519                 /* advance to xferStatus/timeStamp */
1520                 recv->dma_offset += len;
1521
1522                 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1523                 /* payload is padded to 4 bytes */
1524                 if (len % 4) {
1525                         recv->dma_offset += 4 - (len%4);
1526                         total_len += 4 - (len%4);
1527                 }
1528
1529                 /* check for wrap-around */
1530                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1531                         /* uh oh, the packet data wraps from the last
1532                            to the first DMA block - make the packet
1533                            contiguous by copying its "tail" into the
1534                            guard page */
1535
1536                         int guard_off = recv->buf_stride*recv->nblocks;
1537                         int tail_len = len - (guard_off - offset);
1538
1539                         if (tail_len > 0  && tail_len < recv->buf_stride) {
1540                                 memcpy(iso->data_buf.kvirt + guard_off,
1541                                        iso->data_buf.kvirt,
1542                                        tail_len);
1543                         }
1544
1545                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1546                 }
1547
1548                 /* parse timestamp */
1549                 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1550                 cycle &= 0x1FFF;
1551
1552                 /* advance to next packet */
1553                 recv->dma_offset += 4;
1554
1555                 /* check for wrap-around */
1556                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1557                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1558                 }
1559
1560                 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1561         }
1562
1563         if (wake)
1564                 hpsb_iso_wake(iso);
1565 }
1566
1567 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1568 {
1569         int loop;
1570         struct ti_ohci *ohci = recv->ohci;
1571
1572         /* loop over all blocks */
1573         for (loop = 0; loop < recv->nblocks; loop++) {
1574
1575                 /* check block_dma to see if it's done */
1576                 struct dma_cmd *im = &recv->block[recv->block_dma];
1577
1578                 /* check the DMA descriptor for new writes to xferStatus */
1579                 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1580
1581                 /* rescount is the number of bytes *remaining to be written* in the block */
1582                 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1583
1584                 unsigned char event = xferstatus & 0x1F;
1585
1586                 if (!event) {
1587                         /* nothing has happened to this block yet */
1588                         break;
1589                 }
1590
1591                 if (event != 0x11) {
1592                         atomic_inc(&iso->overflows);
1593                         PRINT(KERN_ERR,
1594                               "IR DMA error - OHCI error code 0x%02x\n", event);
1595                 }
1596
1597                 if (rescount != 0) {
1598                         /* the card is still writing to this block;
1599                            we can't touch it until it's done */
1600                         break;
1601                 }
1602
1603                 /* OK, the block is finished... */
1604
1605                 /* sync our view of the block */
1606                 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1607
1608                 /* reset the DMA descriptor */
1609                 im->status = recv->buf_stride;
1610
1611                 /* advance block_dma */
1612                 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1613
1614                 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1615                         atomic_inc(&iso->overflows);
1616                         DBGMSG("ISO reception overflow - "
1617                                "ran out of DMA blocks");
1618                 }
1619         }
1620
1621         /* parse any packets that have arrived */
1622         ohci_iso_recv_bufferfill_parse(iso, recv);
1623 }
1624
1625 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1626 {
1627         int count;
1628         int wake = 0;
1629         struct ti_ohci *ohci = recv->ohci;
1630
1631         /* loop over the entire buffer */
1632         for (count = 0; count < recv->nblocks; count++) {
1633                 u32 packet_len = 0;
1634
1635                 /* pointer to the DMA descriptor */
1636                 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1637
1638                 /* check the DMA descriptor for new writes to xferStatus */
1639                 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1640                 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1641
1642                 unsigned char event = xferstatus & 0x1F;
1643
1644                 if (!event) {
1645                         /* this packet hasn't come in yet; we are done for now */
1646                         goto out;
1647                 }
1648
1649                 if (event == 0x11) {
1650                         /* packet received successfully! */
1651
1652                         /* rescount is the number of bytes *remaining* in the packet buffer,
1653                            after the packet was written */
1654                         packet_len = recv->buf_stride - rescount;
1655
1656                 } else if (event == 0x02) {
1657                         PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1658                 } else if (event) {
1659                         PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1660                 }
1661
1662                 /* sync our view of the buffer */
1663                 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1664
1665                 /* record the per-packet info */
1666                 {
1667                         /* iso header is 8 bytes ahead of the data payload */
1668                         unsigned char *hdr;
1669
1670                         unsigned int offset;
1671                         unsigned short cycle;
1672                         unsigned char channel, tag, sy;
1673
1674                         offset = iso->pkt_dma * recv->buf_stride;
1675                         hdr = iso->data_buf.kvirt + offset;
1676
1677                         /* skip iso header */
1678                         offset += 8;
1679                         packet_len -= 8;
1680
1681                         cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1682                         channel = hdr[5] & 0x3F;
1683                         tag = hdr[5] >> 6;
1684                         sy = hdr[4] & 0xF;
1685
1686                         hpsb_iso_packet_received(iso, offset, packet_len,
1687                                         recv->buf_stride, cycle, channel, tag, sy);
1688                 }
1689
1690                 /* reset the DMA descriptor */
1691                 il->status = recv->buf_stride;
1692
1693                 wake = 1;
1694                 recv->block_dma = iso->pkt_dma;
1695         }
1696
1697 out:
1698         if (wake)
1699                 hpsb_iso_wake(iso);
1700 }
1701
1702 static void ohci_iso_recv_task(unsigned long data)
1703 {
1704         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1705         struct ohci_iso_recv *recv = iso->hostdata;
1706
1707         if (recv->dma_mode == BUFFER_FILL_MODE)
1708                 ohci_iso_recv_bufferfill_task(iso, recv);
1709         else
1710                 ohci_iso_recv_packetperbuf_task(iso, recv);
1711 }
1712
1713 /***********************************
1714  * rawiso ISO transmission         *
1715  ***********************************/
1716
1717 struct ohci_iso_xmit {
1718         struct ti_ohci *ohci;
1719         struct dma_prog_region prog;
1720         struct ohci1394_iso_tasklet task;
1721         int task_active;
1722         int last_cycle;
1723         atomic_t skips;
1724
1725         u32 ContextControlSet;
1726         u32 ContextControlClear;
1727         u32 CommandPtr;
1728 };
1729
1730 /* transmission DMA program:
1731    one OUTPUT_MORE_IMMEDIATE for the IT header
1732    one OUTPUT_LAST for the buffer data */
1733
1734 struct iso_xmit_cmd {
1735         struct dma_cmd output_more_immediate;
1736         u8 iso_hdr[8];
1737         u32 unused[2];
1738         struct dma_cmd output_last;
1739 };
1740
1741 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1742 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1743 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1744 static void ohci_iso_xmit_task(unsigned long data);
1745
1746 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1747 {
1748         struct ohci_iso_xmit *xmit;
1749         unsigned int prog_size;
1750         int ctx;
1751         int ret = -ENOMEM;
1752
1753         xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
1754         if (!xmit)
1755                 return -ENOMEM;
1756
1757         iso->hostdata = xmit;
1758         xmit->ohci = iso->host->hostdata;
1759         xmit->task_active = 0;
1760         xmit->last_cycle = -1;
1761         atomic_set(&iso->skips, 0);
1762
1763         dma_prog_region_init(&xmit->prog);
1764
1765         prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1766
1767         if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1768                 goto err;
1769
1770         ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1771                                   ohci_iso_xmit_task, (unsigned long) iso);
1772
1773         if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1774                 ret = -EBUSY;
1775                 goto err;
1776         }
1777
1778         xmit->task_active = 1;
1779
1780         /* xmit context registers are spaced 16 bytes apart */
1781         ctx = xmit->task.context;
1782         xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1783         xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1784         xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1785
1786         return 0;
1787
1788 err:
1789         ohci_iso_xmit_shutdown(iso);
1790         return ret;
1791 }
1792
1793 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1794 {
1795         struct ohci_iso_xmit *xmit = iso->hostdata;
1796         struct ti_ohci *ohci = xmit->ohci;
1797
1798         /* disable interrupts */
1799         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1800
1801         /* halt DMA */
1802         if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1803                 /* XXX the DMA context will lock up if you try to send too much data! */
1804                 PRINT(KERN_ERR,
1805                       "you probably exceeded the OHCI card's bandwidth limit - "
1806                       "reload the module and reduce xmit bandwidth");
1807         }
1808 }
1809
1810 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1811 {
1812         struct ohci_iso_xmit *xmit = iso->hostdata;
1813
1814         if (xmit->task_active) {
1815                 ohci_iso_xmit_stop(iso);
1816                 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1817                 xmit->task_active = 0;
1818         }
1819
1820         dma_prog_region_free(&xmit->prog);
1821         kfree(xmit);
1822         iso->hostdata = NULL;
1823 }
1824
1825 static void ohci_iso_xmit_task(unsigned long data)
1826 {
1827         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1828         struct ohci_iso_xmit *xmit = iso->hostdata;
1829         struct ti_ohci *ohci = xmit->ohci;
1830         int wake = 0;
1831         int count;
1832
1833         /* check the whole buffer if necessary, starting at pkt_dma */
1834         for (count = 0; count < iso->buf_packets; count++) {
1835                 int cycle;
1836
1837                 /* DMA descriptor */
1838                 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1839
1840                 /* check for new writes to xferStatus */
1841                 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1842                 u8  event = xferstatus & 0x1F;
1843
1844                 if (!event) {
1845                         /* packet hasn't been sent yet; we are done for now */
1846                         break;
1847                 }
1848
1849                 if (event != 0x11)
1850                         PRINT(KERN_ERR,
1851                               "IT DMA error - OHCI error code 0x%02x\n", event);
1852
1853                 /* at least one packet went out, so wake up the writer */
1854                 wake = 1;
1855
1856                 /* parse cycle */
1857                 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1858
1859                 if (xmit->last_cycle > -1) {
1860                         int cycle_diff = cycle - xmit->last_cycle;
1861                         int skip;
1862
1863                         /* unwrap */
1864                         if (cycle_diff < 0) {
1865                                 cycle_diff += 8000;
1866                                 if (cycle_diff < 0)
1867                                         PRINT(KERN_ERR, "bogus cycle diff %d\n",
1868                                               cycle_diff);
1869                         }
1870
1871                         skip = cycle_diff - 1;
1872                         if (skip > 0) {
1873                                 DBGMSG("skipped %d cycles without packet loss", skip);
1874                                 atomic_add(skip, &iso->skips);
1875                         }
1876                 }
1877                 xmit->last_cycle = cycle;
1878
1879                 /* tell the subsystem the packet has gone out */
1880                 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1881
1882                 /* reset the DMA descriptor for next time */
1883                 cmd->output_last.status = 0;
1884         }
1885
1886         if (wake)
1887                 hpsb_iso_wake(iso);
1888 }
1889
1890 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1891 {
1892         struct ohci_iso_xmit *xmit = iso->hostdata;
1893         struct ti_ohci *ohci = xmit->ohci;
1894
1895         int next_i, prev_i;
1896         struct iso_xmit_cmd *next, *prev;
1897
1898         unsigned int offset;
1899         unsigned short len;
1900         unsigned char tag, sy;
1901
1902         /* check that the packet doesn't cross a page boundary
1903            (we could allow this if we added OUTPUT_MORE descriptor support) */
1904         if (cross_bound(info->offset, info->len)) {
1905                 PRINT(KERN_ERR,
1906                       "rawiso xmit: packet %u crosses a page boundary",
1907                       iso->first_packet);
1908                 return -EINVAL;
1909         }
1910
1911         offset = info->offset;
1912         len = info->len;
1913         tag = info->tag;
1914         sy = info->sy;
1915
1916         /* sync up the card's view of the buffer */
1917         dma_region_sync_for_device(&iso->data_buf, offset, len);
1918
1919         /* append first_packet to the DMA chain */
1920         /* by linking the previous descriptor to it */
1921         /* (next will become the new end of the DMA chain) */
1922
1923         next_i = iso->first_packet;
1924         prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
1925
1926         next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
1927         prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
1928
1929         /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
1930         memset(next, 0, sizeof(struct iso_xmit_cmd));
1931         next->output_more_immediate.control = cpu_to_le32(0x02000008);
1932
1933         /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
1934
1935         /* tcode = 0xA, and sy */
1936         next->iso_hdr[0] = 0xA0 | (sy & 0xF);
1937
1938         /* tag and channel number */
1939         next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
1940
1941         /* transmission speed */
1942         next->iso_hdr[2] = iso->speed & 0x7;
1943
1944         /* payload size */
1945         next->iso_hdr[6] = len & 0xFF;
1946         next->iso_hdr[7] = len >> 8;
1947
1948         /* set up the OUTPUT_LAST */
1949         next->output_last.control = cpu_to_le32(1 << 28);
1950         next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
1951         next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
1952         next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
1953         next->output_last.control |= cpu_to_le32(len);
1954
1955         /* payload bus address */
1956         next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
1957
1958         /* leave branchAddress at zero for now */
1959
1960         /* re-write the previous DMA descriptor to chain to this one */
1961
1962         /* set prev branch address to point to next (Z=3) */
1963         prev->output_last.branchAddress = cpu_to_le32(
1964                 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
1965
1966         /*
1967          * Link the skip address to this descriptor itself. This causes a
1968          * context to skip a cycle whenever lost cycles or FIFO overruns occur,
1969          * without dropping the data at that point the application should then
1970          * decide whether this is an error condition or not. Some protocols
1971          * can deal with this by dropping some rate-matching padding packets.
1972          */
1973         next->output_more_immediate.branchAddress =
1974                         prev->output_last.branchAddress;
1975
1976         /* disable interrupt, unless required by the IRQ interval */
1977         if (prev_i % iso->irq_interval) {
1978                 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
1979         } else {
1980                 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
1981         }
1982
1983         wmb();
1984
1985         /* wake DMA in case it is sleeping */
1986         reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
1987
1988         /* issue a dummy read of the cycle timer to force all PCI
1989            writes to be posted immediately */
1990         mb();
1991         reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
1992
1993         return 0;
1994 }
1995
1996 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
1997 {
1998         struct ohci_iso_xmit *xmit = iso->hostdata;
1999         struct ti_ohci *ohci = xmit->ohci;
2000
2001         /* clear out the control register */
2002         reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2003         wmb();
2004
2005         /* address and length of first descriptor block (Z=3) */
2006         reg_write(xmit->ohci, xmit->CommandPtr,
2007                   dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2008
2009         /* cycle match */
2010         if (cycle != -1) {
2011                 u32 start = cycle & 0x1FFF;
2012
2013                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2014                    just snarf them from the current time */
2015                 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2016
2017                 /* advance one second to give some extra time for DMA to start */
2018                 seconds += 1;
2019
2020                 start |= (seconds & 3) << 13;
2021
2022                 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2023         }
2024
2025         /* enable interrupts */
2026         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2027
2028         /* run */
2029         reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2030         mb();
2031
2032         /* wait 100 usec to give the card time to go active */
2033         udelay(100);
2034
2035         /* check the RUN bit */
2036         if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2037                 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2038                       reg_read(xmit->ohci, xmit->ContextControlSet));
2039                 return -1;
2040         }
2041
2042         return 0;
2043 }
2044
2045 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2046 {
2047
2048         switch(cmd) {
2049         case XMIT_INIT:
2050                 return ohci_iso_xmit_init(iso);
2051         case XMIT_START:
2052                 return ohci_iso_xmit_start(iso, arg);
2053         case XMIT_STOP:
2054                 ohci_iso_xmit_stop(iso);
2055                 return 0;
2056         case XMIT_QUEUE:
2057                 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2058         case XMIT_SHUTDOWN:
2059                 ohci_iso_xmit_shutdown(iso);
2060                 return 0;
2061
2062         case RECV_INIT:
2063                 return ohci_iso_recv_init(iso);
2064         case RECV_START: {
2065                 int *args = (int*) arg;
2066                 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2067         }
2068         case RECV_STOP:
2069                 ohci_iso_recv_stop(iso);
2070                 return 0;
2071         case RECV_RELEASE:
2072                 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2073                 return 0;
2074         case RECV_FLUSH:
2075                 ohci_iso_recv_task((unsigned long) iso);
2076                 return 0;
2077         case RECV_SHUTDOWN:
2078                 ohci_iso_recv_shutdown(iso);
2079                 return 0;
2080         case RECV_LISTEN_CHANNEL:
2081                 ohci_iso_recv_change_channel(iso, arg, 1);
2082                 return 0;
2083         case RECV_UNLISTEN_CHANNEL:
2084                 ohci_iso_recv_change_channel(iso, arg, 0);
2085                 return 0;
2086         case RECV_SET_CHANNEL_MASK:
2087                 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2088                 return 0;
2089
2090         default:
2091                 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2092                         cmd);
2093                 break;
2094         }
2095         return -EINVAL;
2096 }
2097
2098 /***************************************
2099  * IEEE-1394 functionality section END *
2100  ***************************************/
2101
2102
2103 /********************************************************
2104  * Global stuff (interrupt handler, init/shutdown code) *
2105  ********************************************************/
2106
2107 static void dma_trm_reset(struct dma_trm_ctx *d)
2108 {
2109         unsigned long flags;
2110         LIST_HEAD(packet_list);
2111         struct ti_ohci *ohci = d->ohci;
2112         struct hpsb_packet *packet, *ptmp;
2113
2114         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2115
2116         /* Lock the context, reset it and release it. Move the packets
2117          * that were pending in the context to packet_list and free
2118          * them after releasing the lock. */
2119
2120         spin_lock_irqsave(&d->lock, flags);
2121
2122         list_splice_init(&d->fifo_list, &packet_list);
2123         list_splice_init(&d->pending_list, &packet_list);
2124
2125         d->branchAddrPtr = NULL;
2126         d->sent_ind = d->prg_ind;
2127         d->free_prgs = d->num_desc;
2128
2129         spin_unlock_irqrestore(&d->lock, flags);
2130
2131         if (list_empty(&packet_list))
2132                 return;
2133
2134         PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2135
2136         /* Now process subsystem callbacks for the packets from this
2137          * context. */
2138         list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2139                 list_del_init(&packet->driver_list);
2140                 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2141         }
2142 }
2143
2144 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2145                                        quadlet_t rx_event,
2146                                        quadlet_t tx_event)
2147 {
2148         struct ohci1394_iso_tasklet *t;
2149         unsigned long mask;
2150         unsigned long flags;
2151
2152         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2153
2154         list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2155                 mask = 1 << t->context;
2156
2157                 if (t->type == OHCI_ISO_TRANSMIT) {
2158                         if (tx_event & mask)
2159                                 tasklet_schedule(&t->tasklet);
2160                 } else {
2161                         /* OHCI_ISO_RECEIVE or OHCI_ISO_MULTICHANNEL_RECEIVE */
2162                         if (rx_event & mask)
2163                                 tasklet_schedule(&t->tasklet);
2164                 }
2165         }
2166
2167         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2168 }
2169
2170 static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
2171 {
2172         quadlet_t event, node_id;
2173         struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2174         struct hpsb_host *host = ohci->host;
2175         int phyid = -1, isroot = 0;
2176         unsigned long flags;
2177
2178         /* Read and clear the interrupt event register.  Don't clear
2179          * the busReset event, though. This is done when we get the
2180          * selfIDComplete interrupt. */
2181         spin_lock_irqsave(&ohci->event_lock, flags);
2182         event = reg_read(ohci, OHCI1394_IntEventClear);
2183         reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2184         spin_unlock_irqrestore(&ohci->event_lock, flags);
2185
2186         if (!event)
2187                 return IRQ_NONE;
2188
2189         /* If event is ~(u32)0 cardbus card was ejected.  In this case
2190          * we just return, and clean up in the ohci1394_pci_remove
2191          * function. */
2192         if (event == ~(u32) 0) {
2193                 DBGMSG("Device removed.");
2194                 return IRQ_NONE;
2195         }
2196
2197         DBGMSG("IntEvent: %08x", event);
2198
2199         if (event & OHCI1394_unrecoverableError) {
2200                 int ctx;
2201                 PRINT(KERN_ERR, "Unrecoverable error!");
2202
2203                 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2204                         PRINT(KERN_ERR, "Async Req Tx Context died: "
2205                                 "ctrl[%08x] cmdptr[%08x]",
2206                                 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2207                                 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2208
2209                 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2210                         PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2211                                 "ctrl[%08x] cmdptr[%08x]",
2212                                 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2213                                 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2214
2215                 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2216                         PRINT(KERN_ERR, "Async Req Rcv Context died: "
2217                                 "ctrl[%08x] cmdptr[%08x]",
2218                                 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2219                                 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2220
2221                 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2222                         PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2223                                 "ctrl[%08x] cmdptr[%08x]",
2224                                 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2225                                 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2226
2227                 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2228                         if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2229                                 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2230                                         "ctrl[%08x] cmdptr[%08x]", ctx,
2231                                         reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2232                                         reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2233                 }
2234
2235                 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2236                         if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2237                                 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2238                                         "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2239                                         reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2240                                         reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2241                                         reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2242                 }
2243
2244                 event &= ~OHCI1394_unrecoverableError;
2245         }
2246         if (event & OHCI1394_postedWriteErr) {
2247                 PRINT(KERN_ERR, "physical posted write error");
2248                 /* no recovery strategy yet, had to involve protocol drivers */
2249                 event &= ~OHCI1394_postedWriteErr;
2250         }
2251         if (event & OHCI1394_cycleTooLong) {
2252                 if(printk_ratelimit())
2253                         PRINT(KERN_WARNING, "isochronous cycle too long");
2254                 else
2255                         DBGMSG("OHCI1394_cycleTooLong");
2256                 reg_write(ohci, OHCI1394_LinkControlSet,
2257                           OHCI1394_LinkControl_CycleMaster);
2258                 event &= ~OHCI1394_cycleTooLong;
2259         }
2260         if (event & OHCI1394_cycleInconsistent) {
2261                 /* We subscribe to the cycleInconsistent event only to
2262                  * clear the corresponding event bit... otherwise,
2263                  * isochronous cycleMatch DMA won't work. */
2264                 DBGMSG("OHCI1394_cycleInconsistent");
2265                 event &= ~OHCI1394_cycleInconsistent;
2266         }
2267         if (event & OHCI1394_busReset) {
2268                 /* The busReset event bit can't be cleared during the
2269                  * selfID phase, so we disable busReset interrupts, to
2270                  * avoid burying the cpu in interrupt requests. */
2271                 spin_lock_irqsave(&ohci->event_lock, flags);
2272                 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2273
2274                 if (ohci->check_busreset) {
2275                         int loop_count = 0;
2276
2277                         udelay(10);
2278
2279                         while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2280                                 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2281
2282                                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2283                                 udelay(10);
2284                                 spin_lock_irqsave(&ohci->event_lock, flags);
2285
2286                                 /* The loop counter check is to prevent the driver
2287                                  * from remaining in this state forever. For the
2288                                  * initial bus reset, the loop continues for ever
2289                                  * and the system hangs, until some device is plugged-in
2290                                  * or out manually into a port! The forced reset seems
2291                                  * to solve this problem. This mainly effects nForce2. */
2292                                 if (loop_count > 10000) {
2293                                         ohci_devctl(host, RESET_BUS, LONG_RESET);
2294                                         DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2295                                         loop_count = 0;
2296                                 }
2297
2298                                 loop_count++;
2299                         }
2300                 }
2301                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2302                 if (!host->in_bus_reset) {
2303                         DBGMSG("irq_handler: Bus reset requested");
2304
2305                         /* Subsystem call */
2306                         hpsb_bus_reset(ohci->host);
2307                 }
2308                 event &= ~OHCI1394_busReset;
2309         }
2310         if (event & OHCI1394_reqTxComplete) {
2311                 struct dma_trm_ctx *d = &ohci->at_req_context;
2312                 DBGMSG("Got reqTxComplete interrupt "
2313                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2314                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2315                         ohci1394_stop_context(ohci, d->ctrlClear,
2316                                               "reqTxComplete");
2317                 else
2318                         dma_trm_tasklet((unsigned long)d);
2319                         //tasklet_schedule(&d->task);
2320                 event &= ~OHCI1394_reqTxComplete;
2321         }
2322         if (event & OHCI1394_respTxComplete) {
2323                 struct dma_trm_ctx *d = &ohci->at_resp_context;
2324                 DBGMSG("Got respTxComplete interrupt "
2325                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2326                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2327                         ohci1394_stop_context(ohci, d->ctrlClear,
2328                                               "respTxComplete");
2329                 else
2330                         tasklet_schedule(&d->task);
2331                 event &= ~OHCI1394_respTxComplete;
2332         }
2333         if (event & OHCI1394_RQPkt) {
2334                 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2335                 DBGMSG("Got RQPkt interrupt status=0x%08X",
2336                        reg_read(ohci, d->ctrlSet));
2337                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2338                         ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2339                 else
2340                         tasklet_schedule(&d->task);
2341                 event &= ~OHCI1394_RQPkt;
2342         }
2343         if (event & OHCI1394_RSPkt) {
2344                 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2345                 DBGMSG("Got RSPkt interrupt status=0x%08X",
2346                        reg_read(ohci, d->ctrlSet));
2347                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2348                         ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2349                 else
2350                         tasklet_schedule(&d->task);
2351                 event &= ~OHCI1394_RSPkt;
2352         }
2353         if (event & OHCI1394_isochRx) {
2354                 quadlet_t rx_event;
2355
2356                 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2357                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2358                 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2359                 event &= ~OHCI1394_isochRx;
2360         }
2361         if (event & OHCI1394_isochTx) {
2362                 quadlet_t tx_event;
2363
2364                 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2365                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2366                 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2367                 event &= ~OHCI1394_isochTx;
2368         }
2369         if (event & OHCI1394_selfIDComplete) {
2370                 if (host->in_bus_reset) {
2371                         node_id = reg_read(ohci, OHCI1394_NodeID);
2372
2373                         if (!(node_id & 0x80000000)) {
2374                                 PRINT(KERN_ERR,
2375                                       "SelfID received, but NodeID invalid "
2376                                       "(probably new bus reset occurred): %08X",
2377                                       node_id);
2378                                 goto selfid_not_valid;
2379                         }
2380
2381                         phyid =  node_id & 0x0000003f;
2382                         isroot = (node_id & 0x40000000) != 0;
2383
2384                         DBGMSG("SelfID interrupt received "
2385                               "(phyid %d, %s)", phyid,
2386                               (isroot ? "root" : "not root"));
2387
2388                         handle_selfid(ohci, host, phyid, isroot);
2389
2390                         /* Clear the bus reset event and re-enable the
2391                          * busReset interrupt.  */
2392                         spin_lock_irqsave(&ohci->event_lock, flags);
2393                         reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2394                         reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2395                         spin_unlock_irqrestore(&ohci->event_lock, flags);
2396
2397                         /* Turn on phys dma reception.
2398                          *
2399                          * TODO: Enable some sort of filtering management.
2400                          */
2401                         if (phys_dma) {
2402                                 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2403                                           0xffffffff);
2404                                 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2405                                           0xffffffff);
2406                         }
2407
2408                         DBGMSG("PhyReqFilter=%08x%08x",
2409                                reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2410                                reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2411
2412                         hpsb_selfid_complete(host, phyid, isroot);
2413                 } else
2414                         PRINT(KERN_ERR,
2415                               "SelfID received outside of bus reset sequence");
2416
2417 selfid_not_valid:
2418                 event &= ~OHCI1394_selfIDComplete;
2419         }
2420
2421         /* Make sure we handle everything, just in case we accidentally
2422          * enabled an interrupt that we didn't write a handler for.  */
2423         if (event)
2424                 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2425                       event);
2426
2427         return IRQ_HANDLED;
2428 }
2429
2430 /* Put the buffer back into the dma context */
2431 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2432 {
2433         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2434         DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2435
2436         d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2437         d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2438         idx = (idx + d->num_desc - 1 ) % d->num_desc;
2439         d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2440
2441         /* To avoid a race, ensure 1394 interface hardware sees the inserted
2442          * context program descriptors before it sees the wakeup bit set. */
2443         wmb();
2444         
2445         /* wake up the dma context if necessary */
2446         if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2447                 PRINT(KERN_INFO,
2448                       "Waking dma ctx=%d ... processing is probably too slow",
2449                       d->ctx);
2450         }
2451
2452         /* do this always, to avoid race condition */
2453         reg_write(ohci, d->ctrlSet, 0x1000);
2454 }
2455
2456 #define cond_le32_to_cpu(data, noswap) \
2457         (noswap ? data : le32_to_cpu(data))
2458
2459 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2460                             -1, 0, -1, 0, -1, -1, 16, -1};
2461
2462 /*
2463  * Determine the length of a packet in the buffer
2464  * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2465  */
2466 static inline int packet_length(struct dma_rcv_ctx *d, int idx,
2467                                 quadlet_t *buf_ptr, int offset,
2468                                 unsigned char tcode, int noswap)
2469 {
2470         int length = -1;
2471
2472         if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2473                 length = TCODE_SIZE[tcode];
2474                 if (length == 0) {
2475                         if (offset + 12 >= d->buf_size) {
2476                                 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2477                                                 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2478                         } else {
2479                                 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2480                         }
2481                         length += 20;
2482                 }
2483         } else if (d->type == DMA_CTX_ISO) {
2484                 /* Assumption: buffer fill mode with header/trailer */
2485                 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2486         }
2487
2488         if (length > 0 && length % 4)
2489                 length += 4 - (length % 4);
2490
2491         return length;
2492 }
2493
2494 /* Tasklet that processes dma receive buffers */
2495 static void dma_rcv_tasklet (unsigned long data)
2496 {
2497         struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2498         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2499         unsigned int split_left, idx, offset, rescount;
2500         unsigned char tcode;
2501         int length, bytes_left, ack;
2502         unsigned long flags;
2503         quadlet_t *buf_ptr;
2504         char *split_ptr;
2505         char msg[256];
2506
2507         spin_lock_irqsave(&d->lock, flags);
2508
2509         idx = d->buf_ind;
2510         offset = d->buf_offset;
2511         buf_ptr = d->buf_cpu[idx] + offset/4;
2512
2513         rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2514         bytes_left = d->buf_size - rescount - offset;
2515
2516         while (bytes_left > 0) {
2517                 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2518
2519                 /* packet_length() will return < 4 for an error */
2520                 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2521
2522                 if (length < 4) { /* something is wrong */
2523                         sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2524                                 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2525                                 d->ctx, length);
2526                         ohci1394_stop_context(ohci, d->ctrlClear, msg);
2527                         spin_unlock_irqrestore(&d->lock, flags);
2528                         return;
2529                 }
2530
2531                 /* The first case is where we have a packet that crosses
2532                  * over more than one descriptor. The next case is where
2533                  * it's all in the first descriptor.  */
2534                 if ((offset + length) > d->buf_size) {
2535                         DBGMSG("Split packet rcv'd");
2536                         if (length > d->split_buf_size) {
2537                                 ohci1394_stop_context(ohci, d->ctrlClear,
2538                                              "Split packet size exceeded");
2539                                 d->buf_ind = idx;
2540                                 d->buf_offset = offset;
2541                                 spin_unlock_irqrestore(&d->lock, flags);
2542                                 return;
2543                         }
2544
2545                         if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2546                             == d->buf_size) {
2547                                 /* Other part of packet not written yet.
2548                                  * this should never happen I think
2549                                  * anyway we'll get it on the next call.  */
2550                                 PRINT(KERN_INFO,
2551                                       "Got only half a packet!");
2552                                 d->buf_ind = idx;
2553                                 d->buf_offset = offset;
2554                                 spin_unlock_irqrestore(&d->lock, flags);
2555                                 return;
2556                         }
2557
2558                         split_left = length;
2559                         split_ptr = (char *)d->spb;
2560                         memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2561                         split_left -= d->buf_size-offset;
2562                         split_ptr += d->buf_size-offset;
2563                         insert_dma_buffer(d, idx);
2564                         idx = (idx+1) % d->num_desc;
2565                         buf_ptr = d->buf_cpu[idx];
2566                         offset=0;
2567
2568                         while (split_left >= d->buf_size) {
2569                                 memcpy(split_ptr,buf_ptr,d->buf_size);
2570                                 split_ptr += d->buf_size;
2571                                 split_left -= d->buf_size;
2572                                 insert_dma_buffer(d, idx);
2573                                 idx = (idx+1) % d->num_desc;
2574                                 buf_ptr = d->buf_cpu[idx];
2575                         }
2576
2577                         if (split_left > 0) {
2578                                 memcpy(split_ptr, buf_ptr, split_left);
2579                                 offset = split_left;
2580                                 buf_ptr += offset/4;
2581                         }
2582                 } else {
2583                         DBGMSG("Single packet rcv'd");
2584                         memcpy(d->spb, buf_ptr, length);
2585                         offset += length;
2586                         buf_ptr += length/4;
2587                         if (offset==d->buf_size) {
2588                                 insert_dma_buffer(d, idx);
2589                                 idx = (idx+1) % d->num_desc;
2590                                 buf_ptr = d->buf_cpu[idx];
2591                                 offset=0;
2592                         }
2593                 }
2594
2595                 /* We get one phy packet to the async descriptor for each
2596                  * bus reset. We always ignore it.  */
2597                 if (tcode != OHCI1394_TCODE_PHY) {
2598                         if (!ohci->no_swap_incoming)
2599                                 header_le32_to_cpu(d->spb, tcode);
2600                         DBGMSG("Packet received from node"
2601                                 " %d ack=0x%02X spd=%d tcode=0x%X"
2602                                 " length=%d ctx=%d tlabel=%d",
2603                                 (d->spb[1]>>16)&0x3f,
2604                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2605                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2606                                 tcode, length, d->ctx,
2607                                 (d->spb[0]>>10)&0x3f);
2608
2609                         ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2610                                 == 0x11) ? 1 : 0;
2611
2612                         hpsb_packet_received(ohci->host, d->spb,
2613                                              length-4, ack);
2614                 }
2615 #ifdef OHCI1394_DEBUG
2616                 else
2617                         PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2618                                d->ctx);
2619 #endif
2620
2621                 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2622
2623                 bytes_left = d->buf_size - rescount - offset;
2624
2625         }
2626
2627         d->buf_ind = idx;
2628         d->buf_offset = offset;
2629
2630         spin_unlock_irqrestore(&d->lock, flags);
2631 }
2632
2633 /* Bottom half that processes sent packets */
2634 static void dma_trm_tasklet (unsigned long data)
2635 {
2636         struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2637         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2638         struct hpsb_packet *packet, *ptmp;
2639         unsigned long flags;
2640         u32 status, ack;
2641         size_t datasize;
2642
2643         spin_lock_irqsave(&d->lock, flags);
2644
2645         list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2646                 datasize = packet->data_size;
2647                 if (datasize && packet->type != hpsb_raw)
2648                         status = le32_to_cpu(
2649                                 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2650                 else
2651                         status = le32_to_cpu(
2652                                 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2653
2654                 if (status == 0)
2655                         /* this packet hasn't been sent yet*/
2656                         break;
2657
2658 #ifdef OHCI1394_DEBUG
2659                 if (datasize)
2660                         if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2661                                 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2662                                        "ack=0x%X spd=%d dataLength=%d ctx=%d",
2663                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2664                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2665                                        status&0x1f, (status>>5)&0x3,
2666                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2667                                        d->ctx);
2668                         else
2669                                 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2670                                        "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2671                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2672                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2673                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2674                                        status&0x1f, (status>>5)&0x3,
2675                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2676                                        d->ctx);
2677                 else
2678                         DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2679                                "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2680                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2681                                         >>16)&0x3f,
2682                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2683                                         >>4)&0xf,
2684                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2685                                         >>10)&0x3f,
2686                                 status&0x1f, (status>>5)&0x3,
2687                                 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2688                                 d->ctx);
2689 #endif
2690
2691                 if (status & 0x10) {
2692                         ack = status & 0xf;
2693                 } else {
2694                         switch (status & 0x1f) {
2695                         case EVT_NO_STATUS: /* that should never happen */
2696                         case EVT_RESERVED_A: /* that should never happen */
2697                         case EVT_LONG_PACKET: /* that should never happen */
2698                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2699                                 ack = ACKX_SEND_ERROR;
2700                                 break;
2701                         case EVT_MISSING_ACK:
2702                                 ack = ACKX_TIMEOUT;
2703                                 break;
2704                         case EVT_UNDERRUN:
2705                                 ack = ACKX_SEND_ERROR;
2706                                 break;
2707                         case EVT_OVERRUN: /* that should never happen */
2708                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2709                                 ack = ACKX_SEND_ERROR;
2710                                 break;
2711                         case EVT_DESCRIPTOR_READ:
2712                         case EVT_DATA_READ:
2713                         case EVT_DATA_WRITE:
2714                                 ack = ACKX_SEND_ERROR;
2715                                 break;
2716                         case EVT_BUS_RESET: /* that should never happen */
2717                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2718                                 ack = ACKX_SEND_ERROR;
2719                                 break;
2720                         case EVT_TIMEOUT:
2721                                 ack = ACKX_TIMEOUT;
2722                                 break;
2723                         case EVT_TCODE_ERR:
2724                                 ack = ACKX_SEND_ERROR;
2725                                 break;
2726                         case EVT_RESERVED_B: /* that should never happen */
2727                         case EVT_RESERVED_C: /* that should never happen */
2728                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2729                                 ack = ACKX_SEND_ERROR;
2730                                 break;
2731                         case EVT_UNKNOWN:
2732                         case EVT_FLUSHED:
2733                                 ack = ACKX_SEND_ERROR;
2734                                 break;
2735                         default:
2736                                 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2737                                 ack = ACKX_SEND_ERROR;
2738                                 BUG();
2739                         }
2740                 }
2741
2742                 list_del_init(&packet->driver_list);
2743                 hpsb_packet_sent(ohci->host, packet, ack);
2744
2745                 if (datasize)
2746                         pci_unmap_single(ohci->dev,
2747                                          cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2748                                          datasize, PCI_DMA_TODEVICE);
2749
2750                 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2751                 d->free_prgs++;
2752         }
2753
2754         dma_trm_flush(ohci, d);
2755
2756         spin_unlock_irqrestore(&d->lock, flags);
2757 }
2758
2759 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2760 {
2761         int i;
2762         struct ti_ohci *ohci = d->ohci;
2763
2764         if (ohci == NULL)
2765                 return;
2766
2767         DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2768
2769         if (d->buf_cpu) {
2770                 for (i=0; i<d->num_desc; i++)
2771                         if (d->buf_cpu[i] && d->buf_bus[i])
2772                                 pci_free_consistent(
2773                                         ohci->dev, d->buf_size,
2774                                         d->buf_cpu[i], d->buf_bus[i]);
2775                 kfree(d->buf_cpu);
2776                 kfree(d->buf_bus);
2777         }
2778         if (d->prg_cpu) {
2779                 for (i=0; i<d->num_desc; i++)
2780                         if (d->prg_cpu[i] && d->prg_bus[i])
2781                                 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2782                                               d->prg_bus[i]);
2783                 pci_pool_destroy(d->prg_pool);
2784                 kfree(d->prg_cpu);
2785                 kfree(d->prg_bus);
2786         }
2787         kfree(d->spb);
2788
2789         /* Mark this context as freed. */
2790         d->ohci = NULL;
2791 }
2792
2793 static int
2794 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2795                   enum context_type type, int ctx, int num_desc,
2796                   int buf_size, int split_buf_size, int context_base)
2797 {
2798         int i, len;
2799         static int num_allocs;
2800         static char pool_name[20];
2801
2802         d->ohci = ohci;
2803         d->type = type;
2804         d->ctx = ctx;
2805
2806         d->num_desc = num_desc;
2807         d->buf_size = buf_size;
2808         d->split_buf_size = split_buf_size;
2809
2810         d->ctrlSet = 0;
2811         d->ctrlClear = 0;
2812         d->cmdPtr = 0;
2813
2814         d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2815         d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2816
2817         if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2818                 PRINT(KERN_ERR, "Failed to allocate %s", "DMA buffer");
2819                 free_dma_rcv_ctx(d);
2820                 return -ENOMEM;
2821         }
2822
2823         d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2824         d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2825
2826         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2827                 PRINT(KERN_ERR, "Failed to allocate %s", "DMA prg");
2828                 free_dma_rcv_ctx(d);
2829                 return -ENOMEM;
2830         }
2831
2832         d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2833
2834         if (d->spb == NULL) {
2835                 PRINT(KERN_ERR, "Failed to allocate %s", "split buffer");
2836                 free_dma_rcv_ctx(d);
2837                 return -ENOMEM;
2838         }
2839         
2840         len = sprintf(pool_name, "ohci1394_rcv_prg");
2841         sprintf(pool_name+len, "%d", num_allocs);
2842         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2843                                 sizeof(struct dma_cmd), 4, 0);
2844         if(d->prg_pool == NULL)
2845         {
2846                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2847                 free_dma_rcv_ctx(d);
2848                 return -ENOMEM;
2849         }
2850         num_allocs++;
2851
2852         for (i=0; i<d->num_desc; i++) {
2853                 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2854                                                      d->buf_size,
2855                                                      d->buf_bus+i);
2856
2857                 if (d->buf_cpu[i] != NULL) {
2858                         memset(d->buf_cpu[i], 0, d->buf_size);
2859                 } else {
2860                         PRINT(KERN_ERR,
2861                               "Failed to allocate %s", "DMA buffer");
2862                         free_dma_rcv_ctx(d);
2863                         return -ENOMEM;
2864                 }
2865
2866                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
2867
2868                 if (d->prg_cpu[i] != NULL) {
2869                         memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2870                 } else {
2871                         PRINT(KERN_ERR,
2872                               "Failed to allocate %s", "DMA prg");
2873                         free_dma_rcv_ctx(d);
2874                         return -ENOMEM;
2875                 }
2876         }
2877
2878         spin_lock_init(&d->lock);
2879
2880         d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2881         d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2882         d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
2883
2884         tasklet_init(&d->task, dma_rcv_tasklet, (unsigned long) d);
2885         return 0;
2886 }
2887
2888 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
2889 {
2890         int i;
2891         struct ti_ohci *ohci = d->ohci;
2892
2893         if (ohci == NULL)
2894                 return;
2895
2896         DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
2897
2898         if (d->prg_cpu) {
2899                 for (i=0; i<d->num_desc; i++)
2900                         if (d->prg_cpu[i] && d->prg_bus[i])
2901                                 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2902                                               d->prg_bus[i]);
2903                 pci_pool_destroy(d->prg_pool);
2904                 kfree(d->prg_cpu);
2905                 kfree(d->prg_bus);
2906         }
2907
2908         /* Mark this context as freed. */
2909         d->ohci = NULL;
2910 }
2911
2912 static int
2913 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
2914                   enum context_type type, int ctx, int num_desc,
2915                   int context_base)
2916 {
2917         int i, len;
2918         static char pool_name[20];
2919         static int num_allocs=0;
2920
2921         d->ohci = ohci;
2922         d->type = type;
2923         d->ctx = ctx;
2924         d->num_desc = num_desc;
2925         d->ctrlSet = 0;
2926         d->ctrlClear = 0;
2927         d->cmdPtr = 0;
2928
2929         d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
2930         d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
2931
2932         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2933                 PRINT(KERN_ERR, "Failed to allocate %s", "AT DMA prg");
2934                 free_dma_trm_ctx(d);
2935                 return -ENOMEM;
2936         }
2937
2938         len = sprintf(pool_name, "ohci1394_trm_prg");
2939         sprintf(pool_name+len, "%d", num_allocs);
2940         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2941                                 sizeof(struct at_dma_prg), 4, 0);
2942         if (d->prg_pool == NULL) {
2943                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2944                 free_dma_trm_ctx(d);
2945                 return -ENOMEM;
2946         }
2947         num_allocs++;
2948
2949         for (i = 0; i < d->num_desc; i++) {
2950                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
2951
2952                 if (d->prg_cpu[i] != NULL) {
2953                         memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
2954                 } else {
2955                         PRINT(KERN_ERR,
2956                               "Failed to allocate %s", "AT DMA prg");
2957                         free_dma_trm_ctx(d);
2958                         return -ENOMEM;
2959                 }
2960         }
2961
2962         spin_lock_init(&d->lock);
2963
2964         /* initialize tasklet */
2965         d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2966         d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2967         d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
2968         tasklet_init(&d->task, dma_trm_tasklet, (unsigned long)d);
2969         return 0;
2970 }
2971
2972 static void ohci_set_hw_config_rom(struct hpsb_host *host, __be32 *config_rom)
2973 {
2974         struct ti_ohci *ohci = host->hostdata;
2975
2976         reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
2977         reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
2978
2979         memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
2980 }
2981
2982
2983 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
2984                                  quadlet_t data, quadlet_t compare)
2985 {
2986         struct ti_ohci *ohci = host->hostdata;
2987         int i;
2988
2989         reg_write(ohci, OHCI1394_CSRData, data);
2990         reg_write(ohci, OHCI1394_CSRCompareData, compare);
2991         reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
2992
2993         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
2994                 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
2995                         break;
2996
2997                 mdelay(1);
2998         }
2999
3000         return reg_read(ohci, OHCI1394_CSRData);
3001 }
3002
3003 static struct hpsb_host_driver ohci1394_driver = {
3004         .owner =                THIS_MODULE,
3005         .name =                 OHCI1394_DRIVER_NAME,
3006         .set_hw_config_rom =    ohci_set_hw_config_rom,
3007         .transmit_packet =      ohci_transmit,
3008         .devctl =               ohci_devctl,
3009         .isoctl =               ohci_isoctl,
3010         .hw_csr_reg =           ohci_hw_csr_reg,
3011 };
3012
3013 /***********************************
3014  * PCI Driver Interface functions  *
3015  ***********************************/
3016
3017 #ifdef CONFIG_PPC_PMAC
3018 static void ohci1394_pmac_on(struct pci_dev *dev)
3019 {
3020         if (machine_is(powermac)) {
3021                 struct device_node *ofn = pci_device_to_OF_node(dev);
3022
3023                 if (ofn) {
3024                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3025                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3026                 }
3027         }
3028 }
3029
3030 static void ohci1394_pmac_off(struct pci_dev *dev)
3031 {
3032         if (machine_is(powermac)) {
3033                 struct device_node *ofn = pci_device_to_OF_node(dev);
3034
3035                 if (ofn) {
3036                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3037                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3038                 }
3039         }
3040 }
3041 #else
3042 #define ohci1394_pmac_on(dev)
3043 #define ohci1394_pmac_off(dev)
3044 #endif /* CONFIG_PPC_PMAC */
3045
3046 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3047                                         const struct pci_device_id *ent)
3048 {
3049         struct hpsb_host *host;
3050         struct ti_ohci *ohci;   /* shortcut to currently handled device */
3051         resource_size_t ohci_base;
3052         int err = -ENOMEM;
3053
3054         ohci1394_pmac_on(dev);
3055         if (pci_enable_device(dev)) {
3056                 PRINT_G(KERN_ERR, "Failed to enable OHCI hardware");
3057                 err = -ENXIO;
3058                 goto err;
3059         }
3060         pci_set_master(dev);
3061
3062         host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3063         if (!host) {
3064                 PRINT_G(KERN_ERR, "Failed to allocate %s", "host structure");
3065                 goto err;
3066         }
3067         ohci = host->hostdata;
3068         ohci->dev = dev;
3069         ohci->host = host;
3070         ohci->init_state = OHCI_INIT_ALLOC_HOST;
3071         host->pdev = dev;
3072         pci_set_drvdata(dev, ohci);
3073
3074         /* We don't want hardware swapping */
3075         pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3076
3077         /* Some oddball Apple controllers do not order the selfid
3078          * properly, so we make up for it here.  */
3079 #ifndef __LITTLE_ENDIAN
3080         /* XXX: Need a better way to check this. I'm wondering if we can
3081          * read the values of the OHCI1394_PCI_HCI_Control and the
3082          * noByteSwapData registers to see if they were not cleared to
3083          * zero. Should this work? Obviously it's not defined what these
3084          * registers will read when they aren't supported. Bleh! */
3085         if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3086             dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3087                 ohci->no_swap_incoming = 1;
3088                 ohci->selfid_swap = 0;
3089         } else
3090                 ohci->selfid_swap = 1;
3091 #endif
3092
3093
3094 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3095 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3096 #endif
3097
3098         /* These chipsets require a bit of extra care when checking after
3099          * a busreset.  */
3100         if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3101              dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3102             (dev->vendor ==  PCI_VENDOR_ID_NVIDIA &&
3103              dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3104                 ohci->check_busreset = 1;
3105
3106         /* We hardwire the MMIO length, since some CardBus adaptors
3107          * fail to report the right length.  Anyway, the ohci spec
3108          * clearly says it's 2kb, so this shouldn't be a problem. */
3109         ohci_base = pci_resource_start(dev, 0);
3110         if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3111                 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3112                       (unsigned long long)pci_resource_len(dev, 0));
3113
3114         if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
3115                                 OHCI1394_DRIVER_NAME)) {
3116                 PRINT_G(KERN_ERR, "MMIO resource (0x%llx - 0x%llx) unavailable",
3117                         (unsigned long long)ohci_base,
3118                         (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
3119                 goto err;
3120         }
3121         ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3122
3123         ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3124         if (ohci->registers == NULL) {
3125                 PRINT_G(KERN_ERR, "Failed to remap registers");
3126                 err = -ENXIO;
3127                 goto err;
3128         }
3129         ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3130         DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3131
3132         /* csr_config rom allocation */
3133         ohci->csr_config_rom_cpu =
3134                 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3135                                      &ohci->csr_config_rom_bus);
3136         if (ohci->csr_config_rom_cpu == NULL) {
3137                 PRINT_G(KERN_ERR, "Failed to allocate %s", "buffer config rom");
3138                 goto err;
3139         }
3140         ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3141
3142         /* self-id dma buffer allocation */
3143         ohci->selfid_buf_cpu =
3144                 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3145                       &ohci->selfid_buf_bus);
3146         if (ohci->selfid_buf_cpu == NULL) {
3147                 PRINT_G(KERN_ERR, "Failed to allocate %s", "self-ID buffer");
3148                 goto err;
3149         }
3150         ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3151
3152         if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3153                 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3154                       "8Kb boundary... may cause problems on some CXD3222 chip",
3155                       ohci->selfid_buf_cpu);
3156
3157         /* No self-id errors at startup */
3158         ohci->self_id_errors = 0;
3159
3160         ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3161         /* AR DMA request context allocation */
3162         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3163                               DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3164                               AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3165                               OHCI1394_AsReqRcvContextBase) < 0) {
3166                 PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Req context");
3167                 goto err;
3168         }
3169         /* AR DMA response context allocation */
3170         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3171                               DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3172                               AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3173                               OHCI1394_AsRspRcvContextBase) < 0) {
3174                 PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Resp context");
3175                 goto err;
3176         }
3177         /* AT DMA request context */
3178         if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3179                               DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3180                               OHCI1394_AsReqTrContextBase) < 0) {
3181                 PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Req context");
3182                 goto err;
3183         }
3184         /* AT DMA response context */
3185         if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3186                               DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3187                               OHCI1394_AsRspTrContextBase) < 0) {
3188                 PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Resp context");
3189                 goto err;
3190         }
3191         /* Start off with a soft reset, to clear everything to a sane
3192          * state. */
3193         ohci_soft_reset(ohci);
3194
3195         /* Now enable LPS, which we need in order to start accessing
3196          * most of the registers.  In fact, on some cards (ALI M5251),
3197          * accessing registers in the SClk domain without LPS enabled
3198          * will lock up the machine. */
3199         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3200
3201         /* Disable and clear interrupts */
3202         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3203         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3204
3205         /* Flush MMIO writes and wait to make sure we have full link enabled. */
3206         reg_read(ohci, OHCI1394_Version);
3207         msleep(50);
3208
3209         /* Determine the number of available IR and IT contexts. */
3210         ohci->nb_iso_rcv_ctx =
3211                 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3212         ohci->nb_iso_xmit_ctx =
3213                 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3214
3215         /* Set the usage bits for non-existent contexts so they can't
3216          * be allocated */
3217         ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3218         ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3219
3220         INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3221         spin_lock_init(&ohci->iso_tasklet_list_lock);
3222         ohci->ISO_channel_usage = 0;
3223         spin_lock_init(&ohci->IR_channel_lock);
3224
3225         spin_lock_init(&ohci->event_lock);
3226
3227         /*
3228          * interrupts are disabled, all right, but... due to IRQF_SHARED we
3229          * might get called anyway.  We'll see no event, of course, but
3230          * we need to get to that "no event", so enough should be initialized
3231          * by that point.
3232          */
3233         err = request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3234                           OHCI1394_DRIVER_NAME, ohci);
3235         if (err) {
3236                 PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
3237                 goto err;
3238         }
3239         ohci->init_state = OHCI_INIT_HAVE_IRQ;
3240         ohci_initialize(ohci);
3241
3242         /* Set certain csr values */
3243         host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3244         host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3245         host->csr.cyc_clk_acc = 100;  /* how do we determine clk accuracy? */
3246         host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3247         host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3248
3249         if (phys_dma) {
3250                 host->low_addr_space =
3251                         (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3252                 if (!host->low_addr_space)
3253                         host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3254         }
3255         host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3256
3257         /* Tell the highlevel this host is ready */
3258         if (hpsb_add_host(host)) {
3259                 PRINT_G(KERN_ERR, "Failed to register host with highlevel");
3260                 goto err;
3261         }
3262         ohci->init_state = OHCI_INIT_DONE;
3263
3264         return 0;
3265 err:
3266         ohci1394_pci_remove(dev);
3267         return err;
3268 }
3269
3270 static void ohci1394_pci_remove(struct pci_dev *dev)
3271 {
3272         struct ti_ohci *ohci;
3273         struct device *device;
3274
3275         ohci = pci_get_drvdata(dev);
3276         if (!ohci)
3277                 goto out;
3278
3279         device = get_device(&ohci->host->device);
3280
3281         switch (ohci->init_state) {
3282         case OHCI_INIT_DONE:
3283                 hpsb_remove_host(ohci->host);
3284
3285                 /* Clear out BUS Options */
3286                 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3287                 reg_write(ohci, OHCI1394_BusOptions,
3288                           (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3289                           0x00ff0000);
3290                 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3291
3292         case OHCI_INIT_HAVE_IRQ:
3293                 /* Clear interrupt registers */
3294                 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3295                 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3296                 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3297                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3298                 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3299                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3300
3301                 /* Disable IRM Contender */
3302                 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3303
3304                 /* Clear link control register */
3305                 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3306
3307                 /* Let all other nodes know to ignore us */
3308                 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3309
3310                 /* Soft reset before we start - this disables
3311                  * interrupts and clears linkEnable and LPS. */
3312                 ohci_soft_reset(ohci);
3313                 free_irq(dev->irq, ohci);
3314
3315         case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3316                 /* The ohci_soft_reset() stops all DMA contexts, so we
3317                  * dont need to do this.  */
3318                 free_dma_rcv_ctx(&ohci->ar_req_context);
3319                 free_dma_rcv_ctx(&ohci->ar_resp_context);
3320                 free_dma_trm_ctx(&ohci->at_req_context);
3321                 free_dma_trm_ctx(&ohci->at_resp_context);
3322
3323         case OHCI_INIT_HAVE_SELFID_BUFFER:
3324                 pci_free_consistent(dev, OHCI1394_SI_DMA_BUF_SIZE,
3325                                     ohci->selfid_buf_cpu,
3326                                     ohci->selfid_buf_bus);
3327
3328         case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3329                 pci_free_consistent(dev, OHCI_CONFIG_ROM_LEN,
3330                                     ohci->csr_config_rom_cpu,
3331                                     ohci->csr_config_rom_bus);
3332
3333         case OHCI_INIT_HAVE_IOMAPPING:
3334                 iounmap(ohci->registers);
3335
3336         case OHCI_INIT_HAVE_MEM_REGION:
3337                 release_mem_region(pci_resource_start(dev, 0),
3338                                    OHCI1394_REGISTER_SIZE);
3339
3340         case OHCI_INIT_ALLOC_HOST:
3341                 pci_set_drvdata(dev, NULL);
3342         }
3343
3344         if (device)
3345                 put_device(device);
3346 out:
3347         ohci1394_pmac_off(dev);
3348 }
3349
3350 #ifdef CONFIG_PM
3351 static int ohci1394_pci_suspend(struct pci_dev *dev, pm_message_t state)
3352 {
3353         int err;
3354         struct ti_ohci *ohci = pci_get_drvdata(dev);
3355
3356         if (!ohci) {
3357                 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3358                        OHCI1394_DRIVER_NAME);
3359                 return -ENXIO;
3360         }
3361         DBGMSG("suspend called");
3362
3363         /* Clear the async DMA contexts and stop using the controller */
3364         hpsb_bus_reset(ohci->host);
3365
3366         /* See ohci1394_pci_remove() for comments on this sequence */
3367         reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3368         reg_write(ohci, OHCI1394_BusOptions,
3369                   (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3370                   0x00ff0000);
3371         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3372         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3373         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3374         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3375         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3376         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3377         set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3378         reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3379         ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3380         ohci_soft_reset(ohci);
3381
3382         free_irq(dev->irq, ohci);
3383         err = pci_save_state(dev);
3384         if (err) {
3385                 PRINT(KERN_ERR, "pci_save_state failed with %d", err);
3386                 return err;
3387         }
3388         err = pci_set_power_state(dev, pci_choose_state(dev, state));
3389         if (err)
3390                 DBGMSG("pci_set_power_state failed with %d", err);
3391         ohci1394_pmac_off(dev);
3392
3393         return 0;
3394 }
3395
3396 static int ohci1394_pci_resume(struct pci_dev *dev)
3397 {
3398         int err;
3399         struct ti_ohci *ohci = pci_get_drvdata(dev);
3400
3401         if (!ohci) {
3402                 printk(KERN_ERR "%s: tried to resume nonexisting host\n",
3403                        OHCI1394_DRIVER_NAME);
3404                 return -ENXIO;
3405         }
3406         DBGMSG("resume called");
3407
3408         ohci1394_pmac_on(dev);
3409         pci_set_power_state(dev, PCI_D0);
3410         pci_restore_state(dev);
3411         err = pci_enable_device(dev);
3412         if (err) {
3413                 PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
3414                 return err;
3415         }
3416
3417         /* See ohci1394_pci_probe() for comments on this sequence */
3418         ohci_soft_reset(ohci);
3419         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3420         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3421         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3422         reg_read(ohci, OHCI1394_Version);
3423         msleep(50);
3424
3425         err = request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3426                           OHCI1394_DRIVER_NAME, ohci);
3427         if (err) {
3428                 PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
3429                 return err;
3430         }
3431
3432         ohci_initialize(ohci);
3433
3434         hpsb_resume_host(ohci->host);
3435         return 0;
3436 }
3437 #endif /* CONFIG_PM */
3438
3439 static struct pci_device_id ohci1394_pci_tbl[] = {
3440         {
3441                 .class =        PCI_CLASS_SERIAL_FIREWIRE_OHCI,
3442                 .class_mask =   PCI_ANY_ID,
3443                 .vendor =       PCI_ANY_ID,
3444                 .device =       PCI_ANY_ID,
3445                 .subvendor =    PCI_ANY_ID,
3446                 .subdevice =    PCI_ANY_ID,
3447         },
3448         { 0, },
3449 };
3450
3451 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3452
3453 static struct pci_driver ohci1394_pci_driver = {
3454         .name =         OHCI1394_DRIVER_NAME,
3455         .id_table =     ohci1394_pci_tbl,
3456         .probe =        ohci1394_pci_probe,
3457         .remove =       ohci1394_pci_remove,
3458 #ifdef CONFIG_PM
3459         .resume =       ohci1394_pci_resume,
3460         .suspend =      ohci1394_pci_suspend,
3461 #endif
3462 };
3463
3464 /***********************************
3465  * OHCI1394 Video Interface        *
3466  ***********************************/
3467
3468 /* essentially the only purpose of this code is to allow another
3469    module to hook into ohci's interrupt handler */
3470
3471 /* returns zero if successful, one if DMA context is locked up */
3472 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3473 {
3474         int i=0;
3475
3476         /* stop the channel program if it's still running */
3477         reg_write(ohci, reg, 0x8000);
3478
3479         /* Wait until it effectively stops */
3480         while (reg_read(ohci, reg) & 0x400) {
3481                 i++;
3482                 if (i>5000) {
3483                         PRINT(KERN_ERR,
3484                               "Runaway loop while stopping context: %s...", msg ? msg : "");
3485                         return 1;
3486                 }
3487
3488                 mb();
3489                 udelay(10);
3490         }
3491         if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3492         return 0;
3493 }
3494
3495 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3496                                void (*func)(unsigned long), unsigned long data)
3497 {
3498         tasklet_init(&tasklet->tasklet, func, data);
3499         tasklet->type = type;
3500         /* We init the tasklet->link field, so we can list_del() it
3501          * without worrying whether it was added to the list or not. */
3502         INIT_LIST_HEAD(&tasklet->link);
3503 }
3504
3505 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3506                                   struct ohci1394_iso_tasklet *tasklet)
3507 {
3508         unsigned long flags, *usage;
3509         int n, i, r = -EBUSY;
3510
3511         if (tasklet->type == OHCI_ISO_TRANSMIT) {
3512                 n = ohci->nb_iso_xmit_ctx;
3513                 usage = &ohci->it_ctx_usage;
3514         }
3515         else {
3516                 n = ohci->nb_iso_rcv_ctx;
3517                 usage = &ohci->ir_ctx_usage;
3518
3519                 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3520                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3521                         if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3522                                 return r;
3523                         }
3524                 }
3525         }
3526
3527         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3528
3529         for (i = 0; i < n; i++)
3530                 if (!test_and_set_bit(i, usage)) {
3531                         tasklet->context = i;
3532                         list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3533                         r = 0;
3534                         break;
3535                 }
3536
3537         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3538
3539         return r;
3540 }
3541
3542 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3543                                      struct ohci1394_iso_tasklet *tasklet)
3544 {
3545         unsigned long flags;
3546
3547         tasklet_kill(&tasklet->tasklet);
3548
3549         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3550
3551         if (tasklet->type == OHCI_ISO_TRANSMIT)
3552                 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3553         else {
3554                 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3555
3556                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3557                         clear_bit(0, &ohci->ir_multichannel_used);
3558                 }
3559         }
3560
3561         list_del(&tasklet->link);
3562
3563         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3564 }
3565
3566 EXPORT_SYMBOL(ohci1394_stop_context);
3567 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3568 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3569 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3570
3571 /***********************************
3572  * General module initialization   *
3573  ***********************************/
3574
3575 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3576 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3577 MODULE_LICENSE("GPL");
3578
3579 static void __exit ohci1394_cleanup (void)
3580 {
3581         pci_unregister_driver(&ohci1394_pci_driver);
3582 }
3583
3584 static int __init ohci1394_init(void)
3585 {
3586         return pci_register_driver(&ohci1394_pci_driver);
3587 }
3588
3589 module_init(ohci1394_init);
3590 module_exit(ohci1394_cleanup);