2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
36 * Things not implemented:
37 * . DMA error recovery
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
68 * Leon van Stuivenberg <leonvs@iae.nl>
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
85 #include <linux/config.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
96 #include <linux/poll.h>
97 #include <asm/byteorder.h>
98 #include <asm/atomic.h>
99 #include <asm/uaccess.h>
100 #include <linux/delay.h>
101 #include <linux/spinlock.h>
103 #include <asm/pgtable.h>
104 #include <asm/page.h>
106 #include <linux/sched.h>
107 #include <linux/types.h>
108 #include <linux/vmalloc.h>
109 #include <linux/init.h>
111 #ifdef CONFIG_PPC_PMAC
112 #include <asm/machdep.h>
113 #include <asm/pmac_feature.h>
114 #include <asm/prom.h>
115 #include <asm/pci-bridge.h>
119 #include "ieee1394.h"
120 #include "ieee1394_types.h"
124 #include "ieee1394_core.h"
125 #include "highlevel.h"
126 #include "ohci1394.h"
128 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129 #define OHCI1394_DEBUG
136 #ifdef OHCI1394_DEBUG
137 #define DBGMSG(fmt, args...) \
138 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
140 #define DBGMSG(fmt, args...)
143 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144 #define OHCI_DMA_ALLOC(fmt, args...) \
145 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146 ++global_outstanding_dmas, ## args)
147 #define OHCI_DMA_FREE(fmt, args...) \
148 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149 --global_outstanding_dmas, ## args)
150 static int global_outstanding_dmas = 0;
152 #define OHCI_DMA_ALLOC(fmt, args...)
153 #define OHCI_DMA_FREE(fmt, args...)
156 /* print general (card independent) information */
157 #define PRINT_G(level, fmt, args...) \
158 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
160 /* print card specific information */
161 #define PRINT(level, fmt, args...) \
162 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
164 /* Module Parameters */
165 static int phys_dma = 1;
166 module_param(phys_dma, int, 0644);
167 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
169 static void dma_trm_tasklet(unsigned long data);
170 static void dma_trm_reset(struct dma_trm_ctx *d);
172 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
173 enum context_type type, int ctx, int num_desc,
174 int buf_size, int split_buf_size, int context_base);
175 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
176 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
178 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
179 enum context_type type, int ctx, int num_desc,
182 static void ohci1394_pci_remove(struct pci_dev *pdev);
184 #ifndef __LITTLE_ENDIAN
185 static unsigned hdr_sizes[] =
187 3, /* TCODE_WRITEQ */
188 4, /* TCODE_WRITEB */
189 3, /* TCODE_WRITE_RESPONSE */
193 3, /* TCODE_READQ_RESPONSE */
194 4, /* TCODE_READB_RESPONSE */
195 1, /* TCODE_CYCLE_START (???) */
196 4, /* TCODE_LOCK_REQUEST */
197 2, /* TCODE_ISO_DATA */
198 4, /* TCODE_LOCK_RESPONSE */
202 static inline void packet_swab(quadlet_t *data, int tcode)
204 size_t size = hdr_sizes[tcode];
206 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
210 data[size] = swab32(data[size]);
213 /* Don't waste cycles on same sex byte swaps */
214 #define packet_swab(w,x)
215 #endif /* !LITTLE_ENDIAN */
217 /***********************************
218 * IEEE-1394 functionality section *
219 ***********************************/
221 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
227 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
229 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
231 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
232 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
238 r = reg_read(ohci, OHCI1394_PhyControl);
240 if (i >= OHCI_LOOP_COUNT)
241 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
242 r, r & 0x80000000, i);
244 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
246 return (r & 0x00ff0000) >> 16;
249 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
255 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
257 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
259 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
260 r = reg_read(ohci, OHCI1394_PhyControl);
261 if (!(r & 0x00004000))
267 if (i == OHCI_LOOP_COUNT)
268 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
269 r, r & 0x00004000, i);
271 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
276 /* Or's our value into the current value */
277 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
281 old = get_phy_reg (ohci, addr);
283 set_phy_reg (ohci, addr, old);
288 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
289 int phyid, int isroot)
291 quadlet_t *q = ohci->selfid_buf_cpu;
292 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
296 /* Check status of self-id reception */
298 if (ohci->selfid_swap)
299 q0 = le32_to_cpu(q[0]);
303 if ((self_id_count & 0x80000000) ||
304 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
306 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
307 self_id_count, q0, ohci->self_id_errors);
309 /* Tip by James Goodwin <jamesg@Filanet.com>:
310 * We had an error, generate another bus reset in response. */
311 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
312 set_phy_reg_mask (ohci, 1, 0x40);
313 ohci->self_id_errors++;
316 "Too many errors on SelfID error reception, giving up!");
321 /* SelfID Ok, reset error counter. */
322 ohci->self_id_errors = 0;
324 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
328 if (ohci->selfid_swap) {
329 q0 = le32_to_cpu(q[0]);
330 q1 = le32_to_cpu(q[1]);
337 DBGMSG ("SelfID packet 0x%x received", q0);
338 hpsb_selfid_received(host, cpu_to_be32(q0));
339 if (((q0 & 0x3f000000) >> 24) == phyid)
340 DBGMSG ("SelfID for this node is 0x%08x", q0);
343 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
349 DBGMSG("SelfID complete");
354 static void ohci_soft_reset(struct ti_ohci *ohci) {
357 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
359 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
360 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
364 DBGMSG ("Soft reset finished");
368 /* Generate the dma receive prgs and start the context */
369 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
371 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
374 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
376 for (i=0; i<d->num_desc; i++) {
379 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
383 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
385 /* End of descriptor list? */
386 if (i + 1 < d->num_desc) {
387 d->prg_cpu[i]->branchAddress =
388 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
390 d->prg_cpu[i]->branchAddress =
391 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
394 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
395 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
401 if (d->type == DMA_CTX_ISO) {
402 /* Clear contextControl */
403 reg_write(ohci, d->ctrlClear, 0xffffffff);
405 /* Set bufferFill, isochHeader, multichannel for IR context */
406 reg_write(ohci, d->ctrlSet, 0xd0000000);
408 /* Set the context match register to match on all tags */
409 reg_write(ohci, d->ctxtMatch, 0xf0000000);
411 /* Clear the multi channel mask high and low registers */
412 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
413 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
415 /* Set up isoRecvIntMask to generate interrupts */
416 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
419 /* Tell the controller where the first AR program is */
420 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
423 reg_write(ohci, d->ctrlSet, 0x00008000);
425 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
428 /* Initialize the dma transmit context */
429 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
431 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
433 /* Stop the context */
434 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
438 d->free_prgs = d->num_desc;
439 d->branchAddrPtr = NULL;
440 INIT_LIST_HEAD(&d->fifo_list);
441 INIT_LIST_HEAD(&d->pending_list);
443 if (d->type == DMA_CTX_ISO) {
444 /* enable interrupts */
445 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
448 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
451 /* Count the number of available iso contexts */
452 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
457 reg_write(ohci, reg, 0xffffffff);
458 tmp = reg_read(ohci, reg);
460 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
462 /* Count the number of contexts */
463 for (i=0; i<32; i++) {
470 /* Global initialization */
471 static void ohci_initialize(struct ti_ohci *ohci)
477 spin_lock_init(&ohci->phy_reg_lock);
479 /* Put some defaults to these undefined bus options */
480 buf = reg_read(ohci, OHCI1394_BusOptions);
481 buf |= 0x60000000; /* Enable CMC and ISC */
482 if (hpsb_disable_irm)
485 buf |= 0x80000000; /* Enable IRMC */
486 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
487 buf &= ~0x18000000; /* Disable PMC and BMC */
488 reg_write(ohci, OHCI1394_BusOptions, buf);
490 /* Set the bus number */
491 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
493 /* Enable posted writes */
494 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
496 /* Clear link control register */
497 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
499 /* Enable cycle timer and cycle master and set the IRM
500 * contender bit in our self ID packets if appropriate. */
501 reg_write(ohci, OHCI1394_LinkControlSet,
502 OHCI1394_LinkControl_CycleTimerEnable |
503 OHCI1394_LinkControl_CycleMaster);
504 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
505 if (hpsb_disable_irm)
506 i &= ~PHY_04_CONTENDER;
508 i |= PHY_04_CONTENDER;
509 set_phy_reg(ohci, 4, i);
511 /* Set up self-id dma buffer */
512 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
514 /* enable self-id and phys */
515 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
516 OHCI1394_LinkControl_RcvPhyPkt);
518 /* Set the Config ROM mapping register */
519 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
521 /* Now get our max packet size */
522 ohci->max_packet_size =
523 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
525 /* Don't accept phy packets into AR request context */
526 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
528 /* Clear the interrupt mask */
529 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
530 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
532 /* Clear the interrupt mask */
533 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
534 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
536 /* Initialize AR dma */
537 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
538 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
540 /* Initialize AT dma */
541 initialize_dma_trm_ctx(&ohci->at_req_context);
542 initialize_dma_trm_ctx(&ohci->at_resp_context);
544 /* Initialize IR Legacy DMA channel mask */
545 ohci->ir_legacy_channels = 0;
547 /* Accept AR requests from all nodes */
548 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
550 /* Set the address range of the physical response unit.
551 * Most controllers do not implement it as a writable register though.
552 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
554 * To actually enable physical responses is the job of our interrupt
555 * handler which programs the physical request filter. */
556 reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000);
558 DBGMSG("physUpperBoundOffset=%08x",
559 reg_read(ohci, OHCI1394_PhyUpperBound));
561 /* Specify AT retries */
562 reg_write(ohci, OHCI1394_ATRetries,
563 OHCI1394_MAX_AT_REQ_RETRIES |
564 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
565 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
567 /* We don't want hardware swapping */
568 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
570 /* Enable interrupts */
571 reg_write(ohci, OHCI1394_IntMaskSet,
572 OHCI1394_unrecoverableError |
573 OHCI1394_masterIntEnable |
575 OHCI1394_selfIDComplete |
578 OHCI1394_respTxComplete |
579 OHCI1394_reqTxComplete |
582 OHCI1394_postedWriteErr |
583 OHCI1394_cycleInconsistent);
586 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
588 buf = reg_read(ohci, OHCI1394_Version);
589 sprintf (irq_buf, "%d", ohci->dev->irq);
590 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
591 "MMIO=[%lx-%lx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
592 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
593 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
594 pci_resource_start(ohci->dev, 0),
595 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
596 ohci->max_packet_size,
597 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
599 /* Check all of our ports to make sure that if anything is
600 * connected, we enable that port. */
601 num_ports = get_phy_reg(ohci, 2) & 0xf;
602 for (i = 0; i < num_ports; i++) {
605 set_phy_reg(ohci, 7, i);
606 status = get_phy_reg(ohci, 8);
609 set_phy_reg(ohci, 8, status & ~1);
612 /* Serial EEPROM Sanity check. */
613 if ((ohci->max_packet_size < 512) ||
614 (ohci->max_packet_size > 4096)) {
615 /* Serial EEPROM contents are suspect, set a sane max packet
616 * size and print the raw contents for bug reports if verbose
617 * debug is enabled. */
618 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
622 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
623 "attempting to setting max_packet_size to 512 bytes");
624 reg_write(ohci, OHCI1394_BusOptions,
625 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
626 ohci->max_packet_size = 512;
627 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
628 PRINT(KERN_DEBUG, " EEPROM Present: %d",
629 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
630 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
634 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
637 for (i = 0; i < 0x20; i++) {
638 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
639 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
640 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
647 * Insert a packet in the DMA fifo and generate the DMA prg
648 * FIXME: rewrite the program in order to accept packets crossing
650 * check also that a single dma descriptor doesn't cross a
653 static void insert_packet(struct ti_ohci *ohci,
654 struct dma_trm_ctx *d, struct hpsb_packet *packet)
657 int idx = d->prg_ind;
659 DBGMSG("Inserting packet for node " NODE_BUS_FMT
660 ", tlabel=%d, tcode=0x%x, speed=%d",
661 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
662 packet->tcode, packet->speed_code);
664 d->prg_cpu[idx]->begin.address = 0;
665 d->prg_cpu[idx]->begin.branchAddress = 0;
667 if (d->type == DMA_CTX_ASYNC_RESP) {
669 * For response packets, we need to put a timeout value in
670 * the 16 lower bits of the status... let's try 1 sec timeout
672 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
673 d->prg_cpu[idx]->begin.status = cpu_to_le32(
674 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
675 ((cycleTimer&0x01fff000)>>12));
677 DBGMSG("cycleTimer: %08x timeStamp: %08x",
678 cycleTimer, d->prg_cpu[idx]->begin.status);
680 d->prg_cpu[idx]->begin.status = 0;
682 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
684 if (packet->type == hpsb_raw) {
685 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
686 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
687 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
689 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
690 (packet->header[0] & 0xFFFF);
692 if (packet->tcode == TCODE_ISO_DATA) {
693 /* Sending an async stream packet */
694 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
696 /* Sending a normal async request or response */
697 d->prg_cpu[idx]->data[1] =
698 (packet->header[1] & 0xFFFF) |
699 (packet->header[0] & 0xFFFF0000);
700 d->prg_cpu[idx]->data[2] = packet->header[2];
701 d->prg_cpu[idx]->data[3] = packet->header[3];
703 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
706 if (packet->data_size) { /* block transmit */
707 if (packet->tcode == TCODE_STREAM_DATA){
708 d->prg_cpu[idx]->begin.control =
709 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
710 DMA_CTL_IMMEDIATE | 0x8);
712 d->prg_cpu[idx]->begin.control =
713 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
714 DMA_CTL_IMMEDIATE | 0x10);
716 d->prg_cpu[idx]->end.control =
717 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
722 * Check that the packet data buffer
723 * does not cross a page boundary.
725 * XXX Fix this some day. eth1394 seems to trigger
726 * it, but ignoring it doesn't seem to cause a
730 if (cross_bound((unsigned long)packet->data,
731 packet->data_size)>0) {
732 /* FIXME: do something about it */
734 "%s: packet data addr: %p size %Zd bytes "
735 "cross page boundary", __FUNCTION__,
736 packet->data, packet->data_size);
739 d->prg_cpu[idx]->end.address = cpu_to_le32(
740 pci_map_single(ohci->dev, packet->data,
743 OHCI_DMA_ALLOC("single, block transmit packet");
745 d->prg_cpu[idx]->end.branchAddress = 0;
746 d->prg_cpu[idx]->end.status = 0;
747 if (d->branchAddrPtr)
748 *(d->branchAddrPtr) =
749 cpu_to_le32(d->prg_bus[idx] | 0x3);
751 &(d->prg_cpu[idx]->end.branchAddress);
752 } else { /* quadlet transmit */
753 if (packet->type == hpsb_raw)
754 d->prg_cpu[idx]->begin.control =
755 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
759 (packet->header_size + 4));
761 d->prg_cpu[idx]->begin.control =
762 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
766 packet->header_size);
768 if (d->branchAddrPtr)
769 *(d->branchAddrPtr) =
770 cpu_to_le32(d->prg_bus[idx] | 0x2);
772 &(d->prg_cpu[idx]->begin.branchAddress);
775 } else { /* iso packet */
776 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
777 (packet->header[0] & 0xFFFF);
778 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
779 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
781 d->prg_cpu[idx]->begin.control =
782 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
783 DMA_CTL_IMMEDIATE | 0x8);
784 d->prg_cpu[idx]->end.control =
785 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
790 d->prg_cpu[idx]->end.address = cpu_to_le32(
791 pci_map_single(ohci->dev, packet->data,
792 packet->data_size, PCI_DMA_TODEVICE));
793 OHCI_DMA_ALLOC("single, iso transmit packet");
795 d->prg_cpu[idx]->end.branchAddress = 0;
796 d->prg_cpu[idx]->end.status = 0;
797 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
798 " begin=%08x %08x %08x %08x\n"
799 " %08x %08x %08x %08x\n"
800 " end =%08x %08x %08x %08x",
801 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
802 d->prg_cpu[idx]->begin.control,
803 d->prg_cpu[idx]->begin.address,
804 d->prg_cpu[idx]->begin.branchAddress,
805 d->prg_cpu[idx]->begin.status,
806 d->prg_cpu[idx]->data[0],
807 d->prg_cpu[idx]->data[1],
808 d->prg_cpu[idx]->data[2],
809 d->prg_cpu[idx]->data[3],
810 d->prg_cpu[idx]->end.control,
811 d->prg_cpu[idx]->end.address,
812 d->prg_cpu[idx]->end.branchAddress,
813 d->prg_cpu[idx]->end.status);
814 if (d->branchAddrPtr)
815 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
816 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
820 /* queue the packet in the appropriate context queue */
821 list_add_tail(&packet->driver_list, &d->fifo_list);
822 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
826 * This function fills the FIFO with the (eventual) pending packets
827 * and runs or wakes up the DMA prg if necessary.
829 * The function MUST be called with the d->lock held.
831 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
833 struct hpsb_packet *packet, *ptmp;
834 int idx = d->prg_ind;
837 /* insert the packets into the dma fifo */
838 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
842 /* For the first packet only */
844 z = (packet->data_size) ? 3 : 2;
846 /* Insert the packet */
847 list_del_init(&packet->driver_list);
848 insert_packet(ohci, d, packet);
851 /* Nothing must have been done, either no free_prgs or no packets */
855 /* Is the context running ? (should be unless it is
856 the first packet to be sent in this context) */
857 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
858 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
860 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
861 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
863 /* Check that the node id is valid, and not 63 */
864 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
865 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
867 reg_write(ohci, d->ctrlSet, 0x8000);
869 /* Wake up the dma context if necessary */
870 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
871 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
873 /* do this always, to avoid race condition */
874 reg_write(ohci, d->ctrlSet, 0x1000);
880 /* Transmission of an async or iso packet */
881 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
883 struct ti_ohci *ohci = host->hostdata;
884 struct dma_trm_ctx *d;
887 if (packet->data_size > ohci->max_packet_size) {
889 "Transmit packet size %Zd is too big",
894 /* Decide whether we have an iso, a request, or a response packet */
895 if (packet->type == hpsb_raw)
896 d = &ohci->at_req_context;
897 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
898 /* The legacy IT DMA context is initialized on first
899 * use. However, the alloc cannot be run from
900 * interrupt context, so we bail out if that is the
901 * case. I don't see anyone sending ISO packets from
902 * interrupt context anyway... */
904 if (ohci->it_legacy_context.ohci == NULL) {
905 if (in_interrupt()) {
907 "legacy IT context cannot be initialized during interrupt");
911 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
912 DMA_CTX_ISO, 0, IT_NUM_DESC,
913 OHCI1394_IsoXmitContextBase) < 0) {
915 "error initializing legacy IT context");
919 initialize_dma_trm_ctx(&ohci->it_legacy_context);
922 d = &ohci->it_legacy_context;
923 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
924 d = &ohci->at_resp_context;
926 d = &ohci->at_req_context;
928 spin_lock_irqsave(&d->lock,flags);
930 list_add_tail(&packet->driver_list, &d->pending_list);
932 dma_trm_flush(ohci, d);
934 spin_unlock_irqrestore(&d->lock,flags);
939 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
941 struct ti_ohci *ohci = host->hostdata;
950 phy_reg = get_phy_reg(ohci, 5);
952 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
955 phy_reg = get_phy_reg(ohci, 1);
957 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
959 case SHORT_RESET_NO_FORCE_ROOT:
960 phy_reg = get_phy_reg(ohci, 1);
961 if (phy_reg & 0x80) {
963 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
966 phy_reg = get_phy_reg(ohci, 5);
968 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
970 case LONG_RESET_NO_FORCE_ROOT:
971 phy_reg = get_phy_reg(ohci, 1);
974 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
976 case SHORT_RESET_FORCE_ROOT:
977 phy_reg = get_phy_reg(ohci, 1);
978 if (!(phy_reg & 0x80)) {
980 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
983 phy_reg = get_phy_reg(ohci, 5);
985 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
987 case LONG_RESET_FORCE_ROOT:
988 phy_reg = get_phy_reg(ohci, 1);
990 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
997 case GET_CYCLE_COUNTER:
998 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1001 case SET_CYCLE_COUNTER:
1002 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
1006 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1009 case ACT_CYCLE_MASTER:
1011 /* check if we are root and other nodes are present */
1012 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1013 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1015 * enable cycleTimer, cycleMaster
1017 DBGMSG("Cycle master enabled");
1018 reg_write(ohci, OHCI1394_LinkControlSet,
1019 OHCI1394_LinkControl_CycleTimerEnable |
1020 OHCI1394_LinkControl_CycleMaster);
1023 /* disable cycleTimer, cycleMaster, cycleSource */
1024 reg_write(ohci, OHCI1394_LinkControlClear,
1025 OHCI1394_LinkControl_CycleTimerEnable |
1026 OHCI1394_LinkControl_CycleMaster |
1027 OHCI1394_LinkControl_CycleSource);
1031 case CANCEL_REQUESTS:
1032 DBGMSG("Cancel request received");
1033 dma_trm_reset(&ohci->at_req_context);
1034 dma_trm_reset(&ohci->at_resp_context);
1037 case ISO_LISTEN_CHANNEL:
1040 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1041 int ir_legacy_active;
1043 if (arg<0 || arg>63) {
1045 "%s: IS0 listen channel %d is out of range",
1050 mask = (u64)0x1<<arg;
1052 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1054 if (ohci->ISO_channel_usage & mask) {
1056 "%s: IS0 listen channel %d is already used",
1058 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1062 ir_legacy_active = ohci->ir_legacy_channels;
1064 ohci->ISO_channel_usage |= mask;
1065 ohci->ir_legacy_channels |= mask;
1067 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1069 if (!ir_legacy_active) {
1070 if (ohci1394_register_iso_tasklet(ohci,
1071 &ohci->ir_legacy_tasklet) < 0) {
1072 PRINT(KERN_ERR, "No IR DMA context available");
1076 /* the IR context can be assigned to any DMA context
1077 * by ohci1394_register_iso_tasklet */
1078 d->ctx = ohci->ir_legacy_tasklet.context;
1079 d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1081 d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1083 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1084 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1086 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1088 if (printk_ratelimit())
1089 DBGMSG("IR legacy activated");
1092 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1095 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1098 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1101 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1102 DBGMSG("Listening enabled on channel %d", arg);
1105 case ISO_UNLISTEN_CHANNEL:
1109 if (arg<0 || arg>63) {
1111 "%s: IS0 unlisten channel %d is out of range",
1116 mask = (u64)0x1<<arg;
1118 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1120 if (!(ohci->ISO_channel_usage & mask)) {
1122 "%s: IS0 unlisten channel %d is not used",
1124 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1128 ohci->ISO_channel_usage &= ~mask;
1129 ohci->ir_legacy_channels &= ~mask;
1132 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1135 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1138 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1139 DBGMSG("Listening disabled on channel %d", arg);
1141 if (ohci->ir_legacy_channels == 0) {
1142 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1143 DBGMSG("ISO legacy receive context stopped");
1149 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1156 /***********************************
1157 * rawiso ISO reception *
1158 ***********************************/
1161 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1162 buffer is split into "blocks" (regions described by one DMA
1163 descriptor). Each block must be one page or less in size, and
1164 must not cross a page boundary.
1166 There is one little wrinkle with buffer-fill mode: a packet that
1167 starts in the final block may wrap around into the first block. But
1168 the user API expects all packets to be contiguous. Our solution is
1169 to keep the very last page of the DMA buffer in reserve - if a
1170 packet spans the gap, we copy its tail into this page.
1173 struct ohci_iso_recv {
1174 struct ti_ohci *ohci;
1176 struct ohci1394_iso_tasklet task;
1179 enum { BUFFER_FILL_MODE = 0,
1180 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1182 /* memory and PCI mapping for the DMA descriptors */
1183 struct dma_prog_region prog;
1184 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1186 /* how many DMA blocks fit in the buffer */
1187 unsigned int nblocks;
1189 /* stride of DMA blocks */
1190 unsigned int buf_stride;
1192 /* number of blocks to batch between interrupts */
1193 int block_irq_interval;
1195 /* block that DMA will finish next */
1198 /* (buffer-fill only) block that the reader will release next */
1201 /* (buffer-fill only) bytes of buffer the reader has released,
1202 less than one block */
1205 /* (buffer-fill only) buffer offset at which the next packet will appear */
1208 /* OHCI DMA context control registers */
1209 u32 ContextControlSet;
1210 u32 ContextControlClear;
1215 static void ohci_iso_recv_task(unsigned long data);
1216 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1217 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1218 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1219 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1221 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1223 struct ti_ohci *ohci = iso->host->hostdata;
1224 struct ohci_iso_recv *recv;
1228 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1232 iso->hostdata = recv;
1234 recv->task_active = 0;
1235 dma_prog_region_init(&recv->prog);
1238 /* use buffer-fill mode, unless irq_interval is 1
1239 (note: multichannel requires buffer-fill) */
1241 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1242 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1243 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1245 recv->dma_mode = BUFFER_FILL_MODE;
1248 /* set nblocks, buf_stride, block_irq_interval */
1250 if (recv->dma_mode == BUFFER_FILL_MODE) {
1251 recv->buf_stride = PAGE_SIZE;
1253 /* one block per page of data in the DMA buffer, minus the final guard page */
1254 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1255 if (recv->nblocks < 3) {
1256 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1260 /* iso->irq_interval is in packets - translate that to blocks */
1261 if (iso->irq_interval == 1)
1262 recv->block_irq_interval = 1;
1264 recv->block_irq_interval = iso->irq_interval *
1265 ((recv->nblocks+1)/iso->buf_packets);
1266 if (recv->block_irq_interval*4 > recv->nblocks)
1267 recv->block_irq_interval = recv->nblocks/4;
1268 if (recv->block_irq_interval < 1)
1269 recv->block_irq_interval = 1;
1272 int max_packet_size;
1274 recv->nblocks = iso->buf_packets;
1275 recv->block_irq_interval = iso->irq_interval;
1276 if (recv->block_irq_interval * 4 > iso->buf_packets)
1277 recv->block_irq_interval = iso->buf_packets / 4;
1278 if (recv->block_irq_interval < 1)
1279 recv->block_irq_interval = 1;
1281 /* choose a buffer stride */
1282 /* must be a power of 2, and <= PAGE_SIZE */
1284 max_packet_size = iso->buf_size / iso->buf_packets;
1286 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1287 recv->buf_stride *= 2);
1289 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1290 recv->buf_stride > PAGE_SIZE) {
1291 /* this shouldn't happen, but anyway... */
1292 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1297 recv->block_reader = 0;
1298 recv->released_bytes = 0;
1299 recv->block_dma = 0;
1300 recv->dma_offset = 0;
1302 /* size of DMA program = one descriptor per block */
1303 if (dma_prog_region_alloc(&recv->prog,
1304 sizeof(struct dma_cmd) * recv->nblocks,
1308 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1310 ohci1394_init_iso_tasklet(&recv->task,
1311 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1313 ohci_iso_recv_task, (unsigned long) iso);
1315 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1320 recv->task_active = 1;
1322 /* recv context registers are spaced 32 bytes apart */
1323 ctx = recv->task.context;
1324 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1325 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1326 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1327 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1329 if (iso->channel == -1) {
1330 /* clear multi-channel selection mask */
1331 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1332 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1335 /* write the DMA program */
1336 ohci_iso_recv_program(iso);
1338 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1339 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1340 recv->dma_mode == BUFFER_FILL_MODE ?
1341 "buffer-fill" : "packet-per-buffer",
1342 iso->buf_size/PAGE_SIZE, iso->buf_size,
1343 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1348 ohci_iso_recv_shutdown(iso);
1352 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1354 struct ohci_iso_recv *recv = iso->hostdata;
1356 /* disable interrupts */
1357 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1360 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1363 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1365 struct ohci_iso_recv *recv = iso->hostdata;
1367 if (recv->task_active) {
1368 ohci_iso_recv_stop(iso);
1369 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1370 recv->task_active = 0;
1373 dma_prog_region_free(&recv->prog);
1375 iso->hostdata = NULL;
1378 /* set up a "gapped" ring buffer DMA program */
1379 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1381 struct ohci_iso_recv *recv = iso->hostdata;
1384 /* address of 'branch' field in previous DMA descriptor */
1385 u32 *prev_branch = NULL;
1387 for (blk = 0; blk < recv->nblocks; blk++) {
1390 /* the DMA descriptor */
1391 struct dma_cmd *cmd = &recv->block[blk];
1393 /* offset of the DMA descriptor relative to the DMA prog buffer */
1394 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1396 /* offset of this packet's data within the DMA buffer */
1397 unsigned long buf_offset = blk * recv->buf_stride;
1399 if (recv->dma_mode == BUFFER_FILL_MODE) {
1400 control = 2 << 28; /* INPUT_MORE */
1402 control = 3 << 28; /* INPUT_LAST */
1405 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1407 /* interrupt on last block, and at intervals */
1408 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1409 control |= 3 << 20; /* want interrupt */
1412 control |= 3 << 18; /* enable branch to address */
1413 control |= recv->buf_stride;
1415 cmd->control = cpu_to_le32(control);
1416 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1417 cmd->branchAddress = 0; /* filled in on next loop */
1418 cmd->status = cpu_to_le32(recv->buf_stride);
1420 /* link the previous descriptor to this one */
1422 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1425 prev_branch = &cmd->branchAddress;
1428 /* the final descriptor's branch address and Z should be left at 0 */
1431 /* listen or unlisten to a specific channel (multi-channel mode only) */
1432 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1434 struct ohci_iso_recv *recv = iso->hostdata;
1438 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1441 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1445 reg_write(recv->ohci, reg, (1 << i));
1447 /* issue a dummy read to force all PCI writes to be posted immediately */
1449 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1452 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1454 struct ohci_iso_recv *recv = iso->hostdata;
1457 for (i = 0; i < 64; i++) {
1458 if (mask & (1ULL << i)) {
1460 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1462 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1465 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1467 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1471 /* issue a dummy read to force all PCI writes to be posted immediately */
1473 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1476 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1478 struct ohci_iso_recv *recv = iso->hostdata;
1479 struct ti_ohci *ohci = recv->ohci;
1480 u32 command, contextMatch;
1482 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1485 /* always keep ISO headers */
1486 command = (1 << 30);
1488 if (recv->dma_mode == BUFFER_FILL_MODE)
1489 command |= (1 << 31);
1491 reg_write(recv->ohci, recv->ContextControlSet, command);
1493 /* match on specified tags */
1494 contextMatch = tag_mask << 28;
1496 if (iso->channel == -1) {
1497 /* enable multichannel reception */
1498 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1500 /* listen on channel */
1501 contextMatch |= iso->channel;
1507 /* enable cycleMatch */
1508 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1510 /* set starting cycle */
1513 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1514 just snarf them from the current time */
1515 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1517 /* advance one second to give some extra time for DMA to start */
1520 cycle |= (seconds & 3) << 13;
1522 contextMatch |= cycle << 12;
1526 /* set sync flag on first DMA descriptor */
1527 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1528 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1530 /* match sync field */
1531 contextMatch |= (sync&0xf)<<8;
1534 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1536 /* address of first descriptor block */
1537 command = dma_prog_region_offset_to_bus(&recv->prog,
1538 recv->block_dma * sizeof(struct dma_cmd));
1539 command |= 1; /* Z=1 */
1541 reg_write(recv->ohci, recv->CommandPtr, command);
1543 /* enable interrupts */
1544 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1549 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1551 /* issue a dummy read of the cycle timer register to force
1552 all PCI writes to be posted immediately */
1554 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1557 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1559 "Error starting IR DMA (ContextControl 0x%08x)\n",
1560 reg_read(recv->ohci, recv->ContextControlSet));
1567 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1569 /* re-use the DMA descriptor for the block */
1570 /* by linking the previous descriptor to it */
1573 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1575 struct dma_cmd *next = &recv->block[next_i];
1576 struct dma_cmd *prev = &recv->block[prev_i];
1578 /* ignore out-of-range requests */
1579 if ((block < 0) || (block > recv->nblocks))
1582 /* 'next' becomes the new end of the DMA chain,
1583 so disable branch and enable interrupt */
1584 next->branchAddress = 0;
1585 next->control |= cpu_to_le32(3 << 20);
1586 next->status = cpu_to_le32(recv->buf_stride);
1588 /* link prev to next */
1589 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1590 sizeof(struct dma_cmd) * next_i)
1593 /* disable interrupt on previous DMA descriptor, except at intervals */
1594 if ((prev_i % recv->block_irq_interval) == 0) {
1595 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1597 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1601 /* wake up DMA in case it fell asleep */
1602 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1605 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1606 struct hpsb_iso_packet_info *info)
1608 /* release the memory where the packet was */
1609 recv->released_bytes += info->total_len;
1611 /* have we released enough memory for one block? */
1612 while (recv->released_bytes > recv->buf_stride) {
1613 ohci_iso_recv_release_block(recv, recv->block_reader);
1614 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1615 recv->released_bytes -= recv->buf_stride;
1619 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1621 struct ohci_iso_recv *recv = iso->hostdata;
1622 if (recv->dma_mode == BUFFER_FILL_MODE) {
1623 ohci_iso_recv_bufferfill_release(recv, info);
1625 ohci_iso_recv_release_block(recv, info - iso->infos);
1629 /* parse all packets from blocks that have been fully received */
1630 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1634 struct ti_ohci *ohci = recv->ohci;
1637 /* we expect the next parsable packet to begin at recv->dma_offset */
1638 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1640 unsigned int offset;
1641 unsigned short len, cycle, total_len;
1642 unsigned char channel, tag, sy;
1644 unsigned char *p = iso->data_buf.kvirt;
1646 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1648 /* don't loop indefinitely */
1649 if (runaway++ > 100000) {
1650 atomic_inc(&iso->overflows);
1652 "IR DMA error - Runaway during buffer parsing!\n");
1656 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1657 if (this_block == recv->block_dma)
1662 /* parse data length, tag, channel, and sy */
1664 /* note: we keep our own local copies of 'len' and 'offset'
1665 so the user can't mess with them by poking in the mmap area */
1667 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1671 "IR DMA error - bogus 'len' value %u\n", len);
1674 channel = p[recv->dma_offset+1] & 0x3F;
1675 tag = p[recv->dma_offset+1] >> 6;
1676 sy = p[recv->dma_offset+0] & 0xF;
1678 /* advance to data payload */
1679 recv->dma_offset += 4;
1681 /* check for wrap-around */
1682 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1683 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1686 /* dma_offset now points to the first byte of the data payload */
1687 offset = recv->dma_offset;
1689 /* advance to xferStatus/timeStamp */
1690 recv->dma_offset += len;
1692 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1693 /* payload is padded to 4 bytes */
1695 recv->dma_offset += 4 - (len%4);
1696 total_len += 4 - (len%4);
1699 /* check for wrap-around */
1700 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1701 /* uh oh, the packet data wraps from the last
1702 to the first DMA block - make the packet
1703 contiguous by copying its "tail" into the
1706 int guard_off = recv->buf_stride*recv->nblocks;
1707 int tail_len = len - (guard_off - offset);
1709 if (tail_len > 0 && tail_len < recv->buf_stride) {
1710 memcpy(iso->data_buf.kvirt + guard_off,
1711 iso->data_buf.kvirt,
1715 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1718 /* parse timestamp */
1719 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1722 /* advance to next packet */
1723 recv->dma_offset += 4;
1725 /* check for wrap-around */
1726 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1727 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1730 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1737 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1740 struct ti_ohci *ohci = recv->ohci;
1742 /* loop over all blocks */
1743 for (loop = 0; loop < recv->nblocks; loop++) {
1745 /* check block_dma to see if it's done */
1746 struct dma_cmd *im = &recv->block[recv->block_dma];
1748 /* check the DMA descriptor for new writes to xferStatus */
1749 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1751 /* rescount is the number of bytes *remaining to be written* in the block */
1752 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1754 unsigned char event = xferstatus & 0x1F;
1757 /* nothing has happened to this block yet */
1761 if (event != 0x11) {
1762 atomic_inc(&iso->overflows);
1764 "IR DMA error - OHCI error code 0x%02x\n", event);
1767 if (rescount != 0) {
1768 /* the card is still writing to this block;
1769 we can't touch it until it's done */
1773 /* OK, the block is finished... */
1775 /* sync our view of the block */
1776 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1778 /* reset the DMA descriptor */
1779 im->status = recv->buf_stride;
1781 /* advance block_dma */
1782 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1784 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1785 atomic_inc(&iso->overflows);
1786 DBGMSG("ISO reception overflow - "
1787 "ran out of DMA blocks");
1791 /* parse any packets that have arrived */
1792 ohci_iso_recv_bufferfill_parse(iso, recv);
1795 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1799 struct ti_ohci *ohci = recv->ohci;
1801 /* loop over the entire buffer */
1802 for (count = 0; count < recv->nblocks; count++) {
1805 /* pointer to the DMA descriptor */
1806 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1808 /* check the DMA descriptor for new writes to xferStatus */
1809 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1810 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1812 unsigned char event = xferstatus & 0x1F;
1815 /* this packet hasn't come in yet; we are done for now */
1819 if (event == 0x11) {
1820 /* packet received successfully! */
1822 /* rescount is the number of bytes *remaining* in the packet buffer,
1823 after the packet was written */
1824 packet_len = recv->buf_stride - rescount;
1826 } else if (event == 0x02) {
1827 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1829 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1832 /* sync our view of the buffer */
1833 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1835 /* record the per-packet info */
1837 /* iso header is 8 bytes ahead of the data payload */
1840 unsigned int offset;
1841 unsigned short cycle;
1842 unsigned char channel, tag, sy;
1844 offset = iso->pkt_dma * recv->buf_stride;
1845 hdr = iso->data_buf.kvirt + offset;
1847 /* skip iso header */
1851 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1852 channel = hdr[5] & 0x3F;
1856 hpsb_iso_packet_received(iso, offset, packet_len,
1857 recv->buf_stride, cycle, channel, tag, sy);
1860 /* reset the DMA descriptor */
1861 il->status = recv->buf_stride;
1864 recv->block_dma = iso->pkt_dma;
1872 static void ohci_iso_recv_task(unsigned long data)
1874 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1875 struct ohci_iso_recv *recv = iso->hostdata;
1877 if (recv->dma_mode == BUFFER_FILL_MODE)
1878 ohci_iso_recv_bufferfill_task(iso, recv);
1880 ohci_iso_recv_packetperbuf_task(iso, recv);
1883 /***********************************
1884 * rawiso ISO transmission *
1885 ***********************************/
1887 struct ohci_iso_xmit {
1888 struct ti_ohci *ohci;
1889 struct dma_prog_region prog;
1890 struct ohci1394_iso_tasklet task;
1893 u32 ContextControlSet;
1894 u32 ContextControlClear;
1898 /* transmission DMA program:
1899 one OUTPUT_MORE_IMMEDIATE for the IT header
1900 one OUTPUT_LAST for the buffer data */
1902 struct iso_xmit_cmd {
1903 struct dma_cmd output_more_immediate;
1906 struct dma_cmd output_last;
1909 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1910 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1911 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1912 static void ohci_iso_xmit_task(unsigned long data);
1914 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1916 struct ohci_iso_xmit *xmit;
1917 unsigned int prog_size;
1921 xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1925 iso->hostdata = xmit;
1926 xmit->ohci = iso->host->hostdata;
1927 xmit->task_active = 0;
1929 dma_prog_region_init(&xmit->prog);
1931 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1933 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1936 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1937 ohci_iso_xmit_task, (unsigned long) iso);
1939 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1944 xmit->task_active = 1;
1946 /* xmit context registers are spaced 16 bytes apart */
1947 ctx = xmit->task.context;
1948 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1949 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1950 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1955 ohci_iso_xmit_shutdown(iso);
1959 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1961 struct ohci_iso_xmit *xmit = iso->hostdata;
1962 struct ti_ohci *ohci = xmit->ohci;
1964 /* disable interrupts */
1965 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1968 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1969 /* XXX the DMA context will lock up if you try to send too much data! */
1971 "you probably exceeded the OHCI card's bandwidth limit - "
1972 "reload the module and reduce xmit bandwidth");
1976 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1978 struct ohci_iso_xmit *xmit = iso->hostdata;
1980 if (xmit->task_active) {
1981 ohci_iso_xmit_stop(iso);
1982 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1983 xmit->task_active = 0;
1986 dma_prog_region_free(&xmit->prog);
1988 iso->hostdata = NULL;
1991 static void ohci_iso_xmit_task(unsigned long data)
1993 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1994 struct ohci_iso_xmit *xmit = iso->hostdata;
1995 struct ti_ohci *ohci = xmit->ohci;
1999 /* check the whole buffer if necessary, starting at pkt_dma */
2000 for (count = 0; count < iso->buf_packets; count++) {
2003 /* DMA descriptor */
2004 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2006 /* check for new writes to xferStatus */
2007 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2008 u8 event = xferstatus & 0x1F;
2011 /* packet hasn't been sent yet; we are done for now */
2017 "IT DMA error - OHCI error code 0x%02x\n", event);
2019 /* at least one packet went out, so wake up the writer */
2023 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2025 /* tell the subsystem the packet has gone out */
2026 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2028 /* reset the DMA descriptor for next time */
2029 cmd->output_last.status = 0;
2036 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2038 struct ohci_iso_xmit *xmit = iso->hostdata;
2039 struct ti_ohci *ohci = xmit->ohci;
2042 struct iso_xmit_cmd *next, *prev;
2044 unsigned int offset;
2046 unsigned char tag, sy;
2048 /* check that the packet doesn't cross a page boundary
2049 (we could allow this if we added OUTPUT_MORE descriptor support) */
2050 if (cross_bound(info->offset, info->len)) {
2052 "rawiso xmit: packet %u crosses a page boundary",
2057 offset = info->offset;
2062 /* sync up the card's view of the buffer */
2063 dma_region_sync_for_device(&iso->data_buf, offset, len);
2065 /* append first_packet to the DMA chain */
2066 /* by linking the previous descriptor to it */
2067 /* (next will become the new end of the DMA chain) */
2069 next_i = iso->first_packet;
2070 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2072 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2073 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2075 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2076 memset(next, 0, sizeof(struct iso_xmit_cmd));
2077 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2079 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2081 /* tcode = 0xA, and sy */
2082 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2084 /* tag and channel number */
2085 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2087 /* transmission speed */
2088 next->iso_hdr[2] = iso->speed & 0x7;
2091 next->iso_hdr[6] = len & 0xFF;
2092 next->iso_hdr[7] = len >> 8;
2094 /* set up the OUTPUT_LAST */
2095 next->output_last.control = cpu_to_le32(1 << 28);
2096 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2097 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2098 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2099 next->output_last.control |= cpu_to_le32(len);
2101 /* payload bus address */
2102 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2104 /* leave branchAddress at zero for now */
2106 /* re-write the previous DMA descriptor to chain to this one */
2108 /* set prev branch address to point to next (Z=3) */
2109 prev->output_last.branchAddress = cpu_to_le32(
2110 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2112 /* disable interrupt, unless required by the IRQ interval */
2113 if (prev_i % iso->irq_interval) {
2114 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2116 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2121 /* wake DMA in case it is sleeping */
2122 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2124 /* issue a dummy read of the cycle timer to force all PCI
2125 writes to be posted immediately */
2127 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2132 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2134 struct ohci_iso_xmit *xmit = iso->hostdata;
2135 struct ti_ohci *ohci = xmit->ohci;
2137 /* clear out the control register */
2138 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2141 /* address and length of first descriptor block (Z=3) */
2142 reg_write(xmit->ohci, xmit->CommandPtr,
2143 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2147 u32 start = cycle & 0x1FFF;
2149 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2150 just snarf them from the current time */
2151 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2153 /* advance one second to give some extra time for DMA to start */
2156 start |= (seconds & 3) << 13;
2158 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2161 /* enable interrupts */
2162 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2165 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2168 /* wait 100 usec to give the card time to go active */
2171 /* check the RUN bit */
2172 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2173 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2174 reg_read(xmit->ohci, xmit->ContextControlSet));
2181 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2186 return ohci_iso_xmit_init(iso);
2188 return ohci_iso_xmit_start(iso, arg);
2190 ohci_iso_xmit_stop(iso);
2193 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2195 ohci_iso_xmit_shutdown(iso);
2199 return ohci_iso_recv_init(iso);
2201 int *args = (int*) arg;
2202 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2205 ohci_iso_recv_stop(iso);
2208 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2211 ohci_iso_recv_task((unsigned long) iso);
2214 ohci_iso_recv_shutdown(iso);
2216 case RECV_LISTEN_CHANNEL:
2217 ohci_iso_recv_change_channel(iso, arg, 1);
2219 case RECV_UNLISTEN_CHANNEL:
2220 ohci_iso_recv_change_channel(iso, arg, 0);
2222 case RECV_SET_CHANNEL_MASK:
2223 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2227 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2234 /***************************************
2235 * IEEE-1394 functionality section END *
2236 ***************************************/
2239 /********************************************************
2240 * Global stuff (interrupt handler, init/shutdown code) *
2241 ********************************************************/
2243 static void dma_trm_reset(struct dma_trm_ctx *d)
2245 unsigned long flags;
2246 LIST_HEAD(packet_list);
2247 struct ti_ohci *ohci = d->ohci;
2248 struct hpsb_packet *packet, *ptmp;
2250 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2252 /* Lock the context, reset it and release it. Move the packets
2253 * that were pending in the context to packet_list and free
2254 * them after releasing the lock. */
2256 spin_lock_irqsave(&d->lock, flags);
2258 list_splice(&d->fifo_list, &packet_list);
2259 list_splice(&d->pending_list, &packet_list);
2260 INIT_LIST_HEAD(&d->fifo_list);
2261 INIT_LIST_HEAD(&d->pending_list);
2263 d->branchAddrPtr = NULL;
2264 d->sent_ind = d->prg_ind;
2265 d->free_prgs = d->num_desc;
2267 spin_unlock_irqrestore(&d->lock, flags);
2269 if (list_empty(&packet_list))
2272 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2274 /* Now process subsystem callbacks for the packets from this
2276 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2277 list_del_init(&packet->driver_list);
2278 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2282 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2286 struct ohci1394_iso_tasklet *t;
2288 unsigned long flags;
2290 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2292 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2293 mask = 1 << t->context;
2295 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2296 tasklet_schedule(&t->tasklet);
2297 else if (rx_event & mask)
2298 tasklet_schedule(&t->tasklet);
2301 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2304 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2305 struct pt_regs *regs_are_unused)
2307 quadlet_t event, node_id;
2308 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2309 struct hpsb_host *host = ohci->host;
2310 int phyid = -1, isroot = 0;
2311 unsigned long flags;
2313 /* Read and clear the interrupt event register. Don't clear
2314 * the busReset event, though. This is done when we get the
2315 * selfIDComplete interrupt. */
2316 spin_lock_irqsave(&ohci->event_lock, flags);
2317 event = reg_read(ohci, OHCI1394_IntEventClear);
2318 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2319 spin_unlock_irqrestore(&ohci->event_lock, flags);
2324 /* If event is ~(u32)0 cardbus card was ejected. In this case
2325 * we just return, and clean up in the ohci1394_pci_remove
2327 if (event == ~(u32) 0) {
2328 DBGMSG("Device removed.");
2332 DBGMSG("IntEvent: %08x", event);
2334 if (event & OHCI1394_unrecoverableError) {
2336 PRINT(KERN_ERR, "Unrecoverable error!");
2338 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2339 PRINT(KERN_ERR, "Async Req Tx Context died: "
2340 "ctrl[%08x] cmdptr[%08x]",
2341 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2342 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2344 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2345 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2346 "ctrl[%08x] cmdptr[%08x]",
2347 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2348 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2350 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2351 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2352 "ctrl[%08x] cmdptr[%08x]",
2353 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2354 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2356 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2357 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2358 "ctrl[%08x] cmdptr[%08x]",
2359 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2360 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2362 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2363 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2364 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2365 "ctrl[%08x] cmdptr[%08x]", ctx,
2366 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2367 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2370 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2371 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2372 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2373 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2374 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2375 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2376 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2379 event &= ~OHCI1394_unrecoverableError;
2381 if (event & OHCI1394_postedWriteErr) {
2382 PRINT(KERN_ERR, "physical posted write error");
2383 /* no recovery strategy yet, had to involve protocol drivers */
2385 if (event & OHCI1394_cycleInconsistent) {
2386 /* We subscribe to the cycleInconsistent event only to
2387 * clear the corresponding event bit... otherwise,
2388 * isochronous cycleMatch DMA won't work. */
2389 DBGMSG("OHCI1394_cycleInconsistent");
2390 event &= ~OHCI1394_cycleInconsistent;
2392 if (event & OHCI1394_busReset) {
2393 /* The busReset event bit can't be cleared during the
2394 * selfID phase, so we disable busReset interrupts, to
2395 * avoid burying the cpu in interrupt requests. */
2396 spin_lock_irqsave(&ohci->event_lock, flags);
2397 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2399 if (ohci->check_busreset) {
2404 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2405 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2407 spin_unlock_irqrestore(&ohci->event_lock, flags);
2409 spin_lock_irqsave(&ohci->event_lock, flags);
2411 /* The loop counter check is to prevent the driver
2412 * from remaining in this state forever. For the
2413 * initial bus reset, the loop continues for ever
2414 * and the system hangs, until some device is plugged-in
2415 * or out manually into a port! The forced reset seems
2416 * to solve this problem. This mainly effects nForce2. */
2417 if (loop_count > 10000) {
2418 ohci_devctl(host, RESET_BUS, LONG_RESET);
2419 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2426 spin_unlock_irqrestore(&ohci->event_lock, flags);
2427 if (!host->in_bus_reset) {
2428 DBGMSG("irq_handler: Bus reset requested");
2430 /* Subsystem call */
2431 hpsb_bus_reset(ohci->host);
2433 event &= ~OHCI1394_busReset;
2435 if (event & OHCI1394_reqTxComplete) {
2436 struct dma_trm_ctx *d = &ohci->at_req_context;
2437 DBGMSG("Got reqTxComplete interrupt "
2438 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2439 if (reg_read(ohci, d->ctrlSet) & 0x800)
2440 ohci1394_stop_context(ohci, d->ctrlClear,
2443 dma_trm_tasklet((unsigned long)d);
2444 //tasklet_schedule(&d->task);
2445 event &= ~OHCI1394_reqTxComplete;
2447 if (event & OHCI1394_respTxComplete) {
2448 struct dma_trm_ctx *d = &ohci->at_resp_context;
2449 DBGMSG("Got respTxComplete interrupt "
2450 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2451 if (reg_read(ohci, d->ctrlSet) & 0x800)
2452 ohci1394_stop_context(ohci, d->ctrlClear,
2455 tasklet_schedule(&d->task);
2456 event &= ~OHCI1394_respTxComplete;
2458 if (event & OHCI1394_RQPkt) {
2459 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2460 DBGMSG("Got RQPkt interrupt status=0x%08X",
2461 reg_read(ohci, d->ctrlSet));
2462 if (reg_read(ohci, d->ctrlSet) & 0x800)
2463 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2465 tasklet_schedule(&d->task);
2466 event &= ~OHCI1394_RQPkt;
2468 if (event & OHCI1394_RSPkt) {
2469 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2470 DBGMSG("Got RSPkt interrupt status=0x%08X",
2471 reg_read(ohci, d->ctrlSet));
2472 if (reg_read(ohci, d->ctrlSet) & 0x800)
2473 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2475 tasklet_schedule(&d->task);
2476 event &= ~OHCI1394_RSPkt;
2478 if (event & OHCI1394_isochRx) {
2481 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2482 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2483 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2484 event &= ~OHCI1394_isochRx;
2486 if (event & OHCI1394_isochTx) {
2489 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2490 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2491 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2492 event &= ~OHCI1394_isochTx;
2494 if (event & OHCI1394_selfIDComplete) {
2495 if (host->in_bus_reset) {
2496 node_id = reg_read(ohci, OHCI1394_NodeID);
2498 if (!(node_id & 0x80000000)) {
2500 "SelfID received, but NodeID invalid "
2501 "(probably new bus reset occurred): %08X",
2503 goto selfid_not_valid;
2506 phyid = node_id & 0x0000003f;
2507 isroot = (node_id & 0x40000000) != 0;
2509 DBGMSG("SelfID interrupt received "
2510 "(phyid %d, %s)", phyid,
2511 (isroot ? "root" : "not root"));
2513 handle_selfid(ohci, host, phyid, isroot);
2515 /* Clear the bus reset event and re-enable the
2516 * busReset interrupt. */
2517 spin_lock_irqsave(&ohci->event_lock, flags);
2518 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2519 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2520 spin_unlock_irqrestore(&ohci->event_lock, flags);
2522 /* Turn on phys dma reception.
2524 * TODO: Enable some sort of filtering management.
2527 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2529 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2533 DBGMSG("PhyReqFilter=%08x%08x",
2534 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2535 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2537 hpsb_selfid_complete(host, phyid, isroot);
2540 "SelfID received outside of bus reset sequence");
2543 event &= ~OHCI1394_selfIDComplete;
2546 /* Make sure we handle everything, just in case we accidentally
2547 * enabled an interrupt that we didn't write a handler for. */
2549 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2555 /* Put the buffer back into the dma context */
2556 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2558 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2559 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2561 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2562 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2563 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2564 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2566 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2567 * context program descriptors before it sees the wakeup bit set. */
2570 /* wake up the dma context if necessary */
2571 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2573 "Waking dma ctx=%d ... processing is probably too slow",
2577 /* do this always, to avoid race condition */
2578 reg_write(ohci, d->ctrlSet, 0x1000);
2581 #define cond_le32_to_cpu(data, noswap) \
2582 (noswap ? data : le32_to_cpu(data))
2584 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2585 -1, 0, -1, 0, -1, -1, 16, -1};
2588 * Determine the length of a packet in the buffer
2589 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2591 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2592 int offset, unsigned char tcode, int noswap)
2596 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2597 length = TCODE_SIZE[tcode];
2599 if (offset + 12 >= d->buf_size) {
2600 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2601 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2603 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2607 } else if (d->type == DMA_CTX_ISO) {
2608 /* Assumption: buffer fill mode with header/trailer */
2609 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2612 if (length > 0 && length % 4)
2613 length += 4 - (length % 4);
2618 /* Tasklet that processes dma receive buffers */
2619 static void dma_rcv_tasklet (unsigned long data)
2621 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2622 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2623 unsigned int split_left, idx, offset, rescount;
2624 unsigned char tcode;
2625 int length, bytes_left, ack;
2626 unsigned long flags;
2631 spin_lock_irqsave(&d->lock, flags);
2634 offset = d->buf_offset;
2635 buf_ptr = d->buf_cpu[idx] + offset/4;
2637 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2638 bytes_left = d->buf_size - rescount - offset;
2640 while (bytes_left > 0) {
2641 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2643 /* packet_length() will return < 4 for an error */
2644 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2646 if (length < 4) { /* something is wrong */
2647 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2648 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2650 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2651 spin_unlock_irqrestore(&d->lock, flags);
2655 /* The first case is where we have a packet that crosses
2656 * over more than one descriptor. The next case is where
2657 * it's all in the first descriptor. */
2658 if ((offset + length) > d->buf_size) {
2659 DBGMSG("Split packet rcv'd");
2660 if (length > d->split_buf_size) {
2661 ohci1394_stop_context(ohci, d->ctrlClear,
2662 "Split packet size exceeded");
2664 d->buf_offset = offset;
2665 spin_unlock_irqrestore(&d->lock, flags);
2669 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2671 /* Other part of packet not written yet.
2672 * this should never happen I think
2673 * anyway we'll get it on the next call. */
2675 "Got only half a packet!");
2677 d->buf_offset = offset;
2678 spin_unlock_irqrestore(&d->lock, flags);
2682 split_left = length;
2683 split_ptr = (char *)d->spb;
2684 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2685 split_left -= d->buf_size-offset;
2686 split_ptr += d->buf_size-offset;
2687 insert_dma_buffer(d, idx);
2688 idx = (idx+1) % d->num_desc;
2689 buf_ptr = d->buf_cpu[idx];
2692 while (split_left >= d->buf_size) {
2693 memcpy(split_ptr,buf_ptr,d->buf_size);
2694 split_ptr += d->buf_size;
2695 split_left -= d->buf_size;
2696 insert_dma_buffer(d, idx);
2697 idx = (idx+1) % d->num_desc;
2698 buf_ptr = d->buf_cpu[idx];
2701 if (split_left > 0) {
2702 memcpy(split_ptr, buf_ptr, split_left);
2703 offset = split_left;
2704 buf_ptr += offset/4;
2707 DBGMSG("Single packet rcv'd");
2708 memcpy(d->spb, buf_ptr, length);
2710 buf_ptr += length/4;
2711 if (offset==d->buf_size) {
2712 insert_dma_buffer(d, idx);
2713 idx = (idx+1) % d->num_desc;
2714 buf_ptr = d->buf_cpu[idx];
2719 /* We get one phy packet to the async descriptor for each
2720 * bus reset. We always ignore it. */
2721 if (tcode != OHCI1394_TCODE_PHY) {
2722 if (!ohci->no_swap_incoming)
2723 packet_swab(d->spb, tcode);
2724 DBGMSG("Packet received from node"
2725 " %d ack=0x%02X spd=%d tcode=0x%X"
2726 " length=%d ctx=%d tlabel=%d",
2727 (d->spb[1]>>16)&0x3f,
2728 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2729 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2730 tcode, length, d->ctx,
2731 (cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f);
2733 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2736 hpsb_packet_received(ohci->host, d->spb,
2739 #ifdef OHCI1394_DEBUG
2741 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2745 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2747 bytes_left = d->buf_size - rescount - offset;
2752 d->buf_offset = offset;
2754 spin_unlock_irqrestore(&d->lock, flags);
2757 /* Bottom half that processes sent packets */
2758 static void dma_trm_tasklet (unsigned long data)
2760 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2761 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2762 struct hpsb_packet *packet, *ptmp;
2763 unsigned long flags;
2767 spin_lock_irqsave(&d->lock, flags);
2769 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2770 datasize = packet->data_size;
2771 if (datasize && packet->type != hpsb_raw)
2772 status = le32_to_cpu(
2773 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2775 status = le32_to_cpu(
2776 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2779 /* this packet hasn't been sent yet*/
2782 #ifdef OHCI1394_DEBUG
2784 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2785 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2786 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2787 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2788 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2789 status&0x1f, (status>>5)&0x3,
2790 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2793 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2794 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2795 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2796 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2797 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2798 status&0x1f, (status>>5)&0x3,
2799 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2802 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2803 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2804 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2806 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2808 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2810 status&0x1f, (status>>5)&0x3,
2811 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2815 if (status & 0x10) {
2818 switch (status & 0x1f) {
2819 case EVT_NO_STATUS: /* that should never happen */
2820 case EVT_RESERVED_A: /* that should never happen */
2821 case EVT_LONG_PACKET: /* that should never happen */
2822 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2823 ack = ACKX_SEND_ERROR;
2825 case EVT_MISSING_ACK:
2829 ack = ACKX_SEND_ERROR;
2831 case EVT_OVERRUN: /* that should never happen */
2832 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2833 ack = ACKX_SEND_ERROR;
2835 case EVT_DESCRIPTOR_READ:
2837 case EVT_DATA_WRITE:
2838 ack = ACKX_SEND_ERROR;
2840 case EVT_BUS_RESET: /* that should never happen */
2841 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2842 ack = ACKX_SEND_ERROR;
2848 ack = ACKX_SEND_ERROR;
2850 case EVT_RESERVED_B: /* that should never happen */
2851 case EVT_RESERVED_C: /* that should never happen */
2852 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2853 ack = ACKX_SEND_ERROR;
2857 ack = ACKX_SEND_ERROR;
2860 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2861 ack = ACKX_SEND_ERROR;
2866 list_del_init(&packet->driver_list);
2867 hpsb_packet_sent(ohci->host, packet, ack);
2870 pci_unmap_single(ohci->dev,
2871 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2872 datasize, PCI_DMA_TODEVICE);
2873 OHCI_DMA_FREE("single Xmit data packet");
2876 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2880 dma_trm_flush(ohci, d);
2882 spin_unlock_irqrestore(&d->lock, flags);
2885 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2888 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2890 if (d->type == DMA_CTX_ISO) {
2891 /* disable interrupts */
2892 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2893 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2895 tasklet_kill(&d->task);
2901 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2904 struct ti_ohci *ohci = d->ohci;
2909 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2912 for (i=0; i<d->num_desc; i++)
2913 if (d->buf_cpu[i] && d->buf_bus[i]) {
2914 pci_free_consistent(
2915 ohci->dev, d->buf_size,
2916 d->buf_cpu[i], d->buf_bus[i]);
2917 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2923 for (i=0; i<d->num_desc; i++)
2924 if (d->prg_cpu[i] && d->prg_bus[i]) {
2925 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2926 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2928 pci_pool_destroy(d->prg_pool);
2929 OHCI_DMA_FREE("dma_rcv prg pool");
2935 /* Mark this context as freed. */
2940 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2941 enum context_type type, int ctx, int num_desc,
2942 int buf_size, int split_buf_size, int context_base)
2945 static int num_allocs;
2946 static char pool_name[20];
2952 d->num_desc = num_desc;
2953 d->buf_size = buf_size;
2954 d->split_buf_size = split_buf_size;
2960 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2961 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2963 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2964 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2965 free_dma_rcv_ctx(d);
2969 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2970 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2972 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2973 PRINT(KERN_ERR, "Failed to allocate dma prg");
2974 free_dma_rcv_ctx(d);
2978 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2980 if (d->spb == NULL) {
2981 PRINT(KERN_ERR, "Failed to allocate split buffer");
2982 free_dma_rcv_ctx(d);
2986 len = sprintf(pool_name, "ohci1394_rcv_prg");
2987 sprintf(pool_name+len, "%d", num_allocs);
2988 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2989 sizeof(struct dma_cmd), 4, 0);
2990 if(d->prg_pool == NULL)
2992 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2993 free_dma_rcv_ctx(d);
2998 OHCI_DMA_ALLOC("dma_rcv prg pool");
3000 for (i=0; i<d->num_desc; i++) {
3001 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3004 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3006 if (d->buf_cpu[i] != NULL) {
3007 memset(d->buf_cpu[i], 0, d->buf_size);
3010 "Failed to allocate dma buffer");
3011 free_dma_rcv_ctx(d);
3015 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3016 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3018 if (d->prg_cpu[i] != NULL) {
3019 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3022 "Failed to allocate dma prg");
3023 free_dma_rcv_ctx(d);
3028 spin_lock_init(&d->lock);
3030 if (type == DMA_CTX_ISO) {
3031 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3032 OHCI_ISO_MULTICHANNEL_RECEIVE,
3033 dma_rcv_tasklet, (unsigned long) d);
3035 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3036 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3037 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3039 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3045 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3048 struct ti_ohci *ohci = d->ohci;
3053 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3056 for (i=0; i<d->num_desc; i++)
3057 if (d->prg_cpu[i] && d->prg_bus[i]) {
3058 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3059 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3061 pci_pool_destroy(d->prg_pool);
3062 OHCI_DMA_FREE("dma_trm prg pool");
3067 /* Mark this context as freed. */
3072 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3073 enum context_type type, int ctx, int num_desc,
3077 static char pool_name[20];
3078 static int num_allocs=0;
3083 d->num_desc = num_desc;
3088 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3089 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
3091 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3092 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3093 free_dma_trm_ctx(d);
3097 len = sprintf(pool_name, "ohci1394_trm_prg");
3098 sprintf(pool_name+len, "%d", num_allocs);
3099 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3100 sizeof(struct at_dma_prg), 4, 0);
3101 if (d->prg_pool == NULL) {
3102 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3103 free_dma_trm_ctx(d);
3108 OHCI_DMA_ALLOC("dma_rcv prg pool");
3110 for (i = 0; i < d->num_desc; i++) {
3111 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3112 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3114 if (d->prg_cpu[i] != NULL) {
3115 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3118 "Failed to allocate at dma prg");
3119 free_dma_trm_ctx(d);
3124 spin_lock_init(&d->lock);
3126 /* initialize tasklet */
3127 if (type == DMA_CTX_ISO) {
3128 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3129 dma_trm_tasklet, (unsigned long) d);
3130 if (ohci1394_register_iso_tasklet(ohci,
3131 &ohci->it_legacy_tasklet) < 0) {
3132 PRINT(KERN_ERR, "No IT DMA context available");
3133 free_dma_trm_ctx(d);
3137 /* IT can be assigned to any context by register_iso_tasklet */
3138 d->ctx = ohci->it_legacy_tasklet.context;
3139 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3140 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3141 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3143 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3144 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3145 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3146 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3152 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3154 struct ti_ohci *ohci = host->hostdata;
3156 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3157 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3159 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3163 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3164 quadlet_t data, quadlet_t compare)
3166 struct ti_ohci *ohci = host->hostdata;
3169 reg_write(ohci, OHCI1394_CSRData, data);
3170 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3171 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3173 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3174 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3180 return reg_read(ohci, OHCI1394_CSRData);
3183 static struct hpsb_host_driver ohci1394_driver = {
3184 .owner = THIS_MODULE,
3185 .name = OHCI1394_DRIVER_NAME,
3186 .set_hw_config_rom = ohci_set_hw_config_rom,
3187 .transmit_packet = ohci_transmit,
3188 .devctl = ohci_devctl,
3189 .isoctl = ohci_isoctl,
3190 .hw_csr_reg = ohci_hw_csr_reg,
3193 /***********************************
3194 * PCI Driver Interface functions *
3195 ***********************************/
3197 #define FAIL(err, fmt, args...) \
3199 PRINT_G(KERN_ERR, fmt , ## args); \
3200 ohci1394_pci_remove(dev); \
3204 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3205 const struct pci_device_id *ent)
3207 struct hpsb_host *host;
3208 struct ti_ohci *ohci; /* shortcut to currently handled device */
3209 unsigned long ohci_base;
3211 if (pci_enable_device(dev))
3212 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3213 pci_set_master(dev);
3215 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3216 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3218 ohci = host->hostdata;
3221 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3223 pci_set_drvdata(dev, ohci);
3225 /* We don't want hardware swapping */
3226 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3228 /* Some oddball Apple controllers do not order the selfid
3229 * properly, so we make up for it here. */
3230 #ifndef __LITTLE_ENDIAN
3231 /* XXX: Need a better way to check this. I'm wondering if we can
3232 * read the values of the OHCI1394_PCI_HCI_Control and the
3233 * noByteSwapData registers to see if they were not cleared to
3234 * zero. Should this work? Obviously it's not defined what these
3235 * registers will read when they aren't supported. Bleh! */
3236 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3237 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3238 ohci->no_swap_incoming = 1;
3239 ohci->selfid_swap = 0;
3241 ohci->selfid_swap = 1;
3245 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3246 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3249 /* These chipsets require a bit of extra care when checking after
3251 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3252 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3253 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3254 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3255 ohci->check_busreset = 1;
3257 /* We hardwire the MMIO length, since some CardBus adaptors
3258 * fail to report the right length. Anyway, the ohci spec
3259 * clearly says it's 2kb, so this shouldn't be a problem. */
3260 ohci_base = pci_resource_start(dev, 0);
3261 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3262 PRINT(KERN_WARNING, "PCI resource length of %lx too small!",
3263 pci_resource_len(dev, 0));
3265 /* Seems PCMCIA handles this internally. Not sure why. Seems
3266 * pretty bogus to force a driver to special case this. */
3268 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3269 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3270 ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3272 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3274 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3275 if (ohci->registers == NULL)
3276 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3277 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3278 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3280 /* csr_config rom allocation */
3281 ohci->csr_config_rom_cpu =
3282 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3283 &ohci->csr_config_rom_bus);
3284 OHCI_DMA_ALLOC("consistent csr_config_rom");
3285 if (ohci->csr_config_rom_cpu == NULL)
3286 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3287 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3289 /* self-id dma buffer allocation */
3290 ohci->selfid_buf_cpu =
3291 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3292 &ohci->selfid_buf_bus);
3293 OHCI_DMA_ALLOC("consistent selfid_buf");
3295 if (ohci->selfid_buf_cpu == NULL)
3296 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3297 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3299 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3300 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3301 "8Kb boundary... may cause problems on some CXD3222 chip",
3302 ohci->selfid_buf_cpu);
3304 /* No self-id errors at startup */
3305 ohci->self_id_errors = 0;
3307 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3308 /* AR DMA request context allocation */
3309 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3310 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3311 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3312 OHCI1394_AsReqRcvContextBase) < 0)
3313 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3315 /* AR DMA response context allocation */
3316 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3317 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3318 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3319 OHCI1394_AsRspRcvContextBase) < 0)
3320 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3322 /* AT DMA request context */
3323 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3324 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3325 OHCI1394_AsReqTrContextBase) < 0)
3326 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3328 /* AT DMA response context */
3329 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3330 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3331 OHCI1394_AsRspTrContextBase) < 0)
3332 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3334 /* Start off with a soft reset, to clear everything to a sane
3336 ohci_soft_reset(ohci);
3338 /* Now enable LPS, which we need in order to start accessing
3339 * most of the registers. In fact, on some cards (ALI M5251),
3340 * accessing registers in the SClk domain without LPS enabled
3341 * will lock up the machine. Wait 50msec to make sure we have
3342 * full link enabled. */
3343 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3345 /* Disable and clear interrupts */
3346 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3347 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3351 /* Determine the number of available IR and IT contexts. */
3352 ohci->nb_iso_rcv_ctx =
3353 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3354 ohci->nb_iso_xmit_ctx =
3355 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3357 /* Set the usage bits for non-existent contexts so they can't
3359 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3360 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3362 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3363 spin_lock_init(&ohci->iso_tasklet_list_lock);
3364 ohci->ISO_channel_usage = 0;
3365 spin_lock_init(&ohci->IR_channel_lock);
3367 /* Allocate the IR DMA context right here so we don't have
3368 * to do it in interrupt path - note that this doesn't
3369 * waste much memory and avoids the jugglery required to
3370 * allocate it in IRQ path. */
3371 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3372 DMA_CTX_ISO, 0, IR_NUM_DESC,
3373 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3374 OHCI1394_IsoRcvContextBase) < 0) {
3375 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3378 /* We hopefully don't have to pre-allocate IT DMA like we did
3379 * for IR DMA above. Allocate it on-demand and mark inactive. */
3380 ohci->it_legacy_context.ohci = NULL;
3381 spin_lock_init(&ohci->event_lock);
3384 * interrupts are disabled, all right, but... due to SA_SHIRQ we
3385 * might get called anyway. We'll see no event, of course, but
3386 * we need to get to that "no event", so enough should be initialized
3389 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3390 OHCI1394_DRIVER_NAME, ohci))
3391 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3393 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3394 ohci_initialize(ohci);
3396 /* Set certain csr values */
3397 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3398 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3399 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3400 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3401 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3403 /* Tell the highlevel this host is ready */
3404 if (hpsb_add_host(host))
3405 FAIL(-ENOMEM, "Failed to register host with highlevel");
3407 ohci->init_state = OHCI_INIT_DONE;
3413 static void ohci1394_pci_remove(struct pci_dev *pdev)
3415 struct ti_ohci *ohci;
3418 ohci = pci_get_drvdata(pdev);
3422 dev = get_device(&ohci->host->device);
3424 switch (ohci->init_state) {
3425 case OHCI_INIT_DONE:
3426 hpsb_remove_host(ohci->host);
3428 /* Clear out BUS Options */
3429 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3430 reg_write(ohci, OHCI1394_BusOptions,
3431 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3433 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3435 case OHCI_INIT_HAVE_IRQ:
3436 /* Clear interrupt registers */
3437 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3438 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3439 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3440 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3441 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3442 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3444 /* Disable IRM Contender */
3445 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3447 /* Clear link control register */
3448 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3450 /* Let all other nodes know to ignore us */
3451 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3453 /* Soft reset before we start - this disables
3454 * interrupts and clears linkEnable and LPS. */
3455 ohci_soft_reset(ohci);
3456 free_irq(ohci->dev->irq, ohci);
3458 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3459 /* The ohci_soft_reset() stops all DMA contexts, so we
3460 * dont need to do this. */
3462 free_dma_rcv_ctx(&ohci->ar_req_context);
3463 free_dma_rcv_ctx(&ohci->ar_resp_context);
3466 free_dma_trm_ctx(&ohci->at_req_context);
3467 free_dma_trm_ctx(&ohci->at_resp_context);
3470 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3473 free_dma_trm_ctx(&ohci->it_legacy_context);
3475 /* Free IR legacy dma */
3476 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3479 case OHCI_INIT_HAVE_SELFID_BUFFER:
3480 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3481 ohci->selfid_buf_cpu,
3482 ohci->selfid_buf_bus);
3483 OHCI_DMA_FREE("consistent selfid_buf");
3485 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3486 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3487 ohci->csr_config_rom_cpu,
3488 ohci->csr_config_rom_bus);
3489 OHCI_DMA_FREE("consistent csr_config_rom");
3491 case OHCI_INIT_HAVE_IOMAPPING:
3492 iounmap(ohci->registers);
3494 case OHCI_INIT_HAVE_MEM_REGION:
3496 release_mem_region(pci_resource_start(ohci->dev, 0),
3497 OHCI1394_REGISTER_SIZE);
3500 #ifdef CONFIG_PPC_PMAC
3501 /* On UniNorth, power down the cable and turn off the chip
3502 * clock when the module is removed to save power on
3503 * laptops. Turning it back ON is done by the arch code when
3504 * pci_enable_device() is called */
3506 struct device_node* of_node;
3508 of_node = pci_device_to_OF_node(ohci->dev);
3510 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3511 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3514 #endif /* CONFIG_PPC_PMAC */
3516 case OHCI_INIT_ALLOC_HOST:
3517 pci_set_drvdata(ohci->dev, NULL);
3525 static int ohci1394_pci_resume (struct pci_dev *pdev)
3527 #ifdef CONFIG_PPC_PMAC
3528 if (machine_is(powermac)) {
3529 struct device_node *of_node;
3531 /* Re-enable 1394 */
3532 of_node = pci_device_to_OF_node (pdev);
3534 pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3536 #endif /* CONFIG_PPC_PMAC */
3538 pci_enable_device(pdev);
3544 static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3546 #ifdef CONFIG_PPC_PMAC
3547 if (machine_is(powermac)) {
3548 struct device_node *of_node;
3551 of_node = pci_device_to_OF_node (pdev);
3553 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3561 #define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3563 static struct pci_device_id ohci1394_pci_tbl[] = {
3565 .class = PCI_CLASS_FIREWIRE_OHCI,
3566 .class_mask = PCI_ANY_ID,
3567 .vendor = PCI_ANY_ID,
3568 .device = PCI_ANY_ID,
3569 .subvendor = PCI_ANY_ID,
3570 .subdevice = PCI_ANY_ID,
3575 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3577 static struct pci_driver ohci1394_pci_driver = {
3578 .name = OHCI1394_DRIVER_NAME,
3579 .id_table = ohci1394_pci_tbl,
3580 .probe = ohci1394_pci_probe,
3581 .remove = ohci1394_pci_remove,
3582 .resume = ohci1394_pci_resume,
3583 .suspend = ohci1394_pci_suspend,
3586 /***********************************
3587 * OHCI1394 Video Interface *
3588 ***********************************/
3590 /* essentially the only purpose of this code is to allow another
3591 module to hook into ohci's interrupt handler */
3593 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3597 /* stop the channel program if it's still running */
3598 reg_write(ohci, reg, 0x8000);
3600 /* Wait until it effectively stops */
3601 while (reg_read(ohci, reg) & 0x400) {
3605 "Runaway loop while stopping context: %s...", msg ? msg : "");
3612 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3616 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3617 void (*func)(unsigned long), unsigned long data)
3619 tasklet_init(&tasklet->tasklet, func, data);
3620 tasklet->type = type;
3621 /* We init the tasklet->link field, so we can list_del() it
3622 * without worrying whether it was added to the list or not. */
3623 INIT_LIST_HEAD(&tasklet->link);
3626 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3627 struct ohci1394_iso_tasklet *tasklet)
3629 unsigned long flags, *usage;
3630 int n, i, r = -EBUSY;
3632 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3633 n = ohci->nb_iso_xmit_ctx;
3634 usage = &ohci->it_ctx_usage;
3637 n = ohci->nb_iso_rcv_ctx;
3638 usage = &ohci->ir_ctx_usage;
3640 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3641 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3642 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3648 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3650 for (i = 0; i < n; i++)
3651 if (!test_and_set_bit(i, usage)) {
3652 tasklet->context = i;
3653 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3658 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3663 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3664 struct ohci1394_iso_tasklet *tasklet)
3666 unsigned long flags;
3668 tasklet_kill(&tasklet->tasklet);
3670 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3672 if (tasklet->type == OHCI_ISO_TRANSMIT)
3673 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3675 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3677 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3678 clear_bit(0, &ohci->ir_multichannel_used);
3682 list_del(&tasklet->link);
3684 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3687 EXPORT_SYMBOL(ohci1394_stop_context);
3688 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3689 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3690 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3692 /***********************************
3693 * General module initialization *
3694 ***********************************/
3696 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3697 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3698 MODULE_LICENSE("GPL");
3700 static void __exit ohci1394_cleanup (void)
3702 pci_unregister_driver(&ohci1394_pci_driver);
3705 static int __init ohci1394_init(void)
3707 return pci_register_driver(&ohci1394_pci_driver);
3710 module_init(ohci1394_init);
3711 module_exit(ohci1394_cleanup);