2 * Driver for OHCI 1394 controllers
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/bug.h>
22 #include <linux/compiler.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/firewire.h>
27 #include <linux/firewire-constants.h>
28 #include <linux/gfp.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
32 #include <linux/kernel.h>
33 #include <linux/list.h>
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/mutex.h>
38 #include <linux/pci.h>
39 #include <linux/pci_ids.h>
40 #include <linux/spinlock.h>
41 #include <linux/string.h>
43 #include <asm/byteorder.h>
45 #include <asm/system.h>
47 #ifdef CONFIG_PPC_PMAC
48 #include <asm/pmac_feature.h>
54 #define DESCRIPTOR_OUTPUT_MORE 0
55 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
56 #define DESCRIPTOR_INPUT_MORE (2 << 12)
57 #define DESCRIPTOR_INPUT_LAST (3 << 12)
58 #define DESCRIPTOR_STATUS (1 << 11)
59 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
60 #define DESCRIPTOR_PING (1 << 7)
61 #define DESCRIPTOR_YY (1 << 6)
62 #define DESCRIPTOR_NO_IRQ (0 << 4)
63 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
64 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
65 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
66 #define DESCRIPTOR_WAIT (3 << 0)
72 __le32 branch_address;
74 __le16 transfer_status;
75 } __attribute__((aligned(16)));
77 #define CONTROL_SET(regs) (regs)
78 #define CONTROL_CLEAR(regs) ((regs) + 4)
79 #define COMMAND_PTR(regs) ((regs) + 12)
80 #define CONTEXT_MATCH(regs) ((regs) + 16)
83 struct descriptor descriptor;
84 struct ar_buffer *next;
90 struct ar_buffer *current_buffer;
91 struct ar_buffer *last_buffer;
94 struct tasklet_struct tasklet;
99 typedef int (*descriptor_callback_t)(struct context *ctx,
100 struct descriptor *d,
101 struct descriptor *last);
104 * A buffer that contains a block of DMA-able coherent memory used for
105 * storing a portion of a DMA descriptor program.
107 struct descriptor_buffer {
108 struct list_head list;
109 dma_addr_t buffer_bus;
112 struct descriptor buffer[0];
116 struct fw_ohci *ohci;
118 int total_allocation;
121 * List of page-sized buffers for storing DMA descriptors.
122 * Head of list contains buffers in use and tail of list contains
125 struct list_head buffer_list;
128 * Pointer to a buffer inside buffer_list that contains the tail
129 * end of the current DMA program.
131 struct descriptor_buffer *buffer_tail;
134 * The descriptor containing the branch address of the first
135 * descriptor that has not yet been filled by the device.
137 struct descriptor *last;
140 * The last descriptor in the DMA program. It contains the branch
141 * address that must be updated upon appending a new descriptor.
143 struct descriptor *prev;
145 descriptor_callback_t callback;
147 struct tasklet_struct tasklet;
150 #define IT_HEADER_SY(v) ((v) << 0)
151 #define IT_HEADER_TCODE(v) ((v) << 4)
152 #define IT_HEADER_CHANNEL(v) ((v) << 8)
153 #define IT_HEADER_TAG(v) ((v) << 14)
154 #define IT_HEADER_SPEED(v) ((v) << 16)
155 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
158 struct fw_iso_context base;
159 struct context context;
162 size_t header_length;
165 #define CONFIG_ROM_SIZE 1024
170 __iomem char *registers;
173 int request_generation; /* for timestamping incoming requests */
175 unsigned int pri_req_max;
178 bool csr_state_setclear_abdicate;
181 * Spinlock for accessing fw_ohci data. Never call out of
182 * this driver with this lock held.
186 struct mutex phy_reg_mutex;
188 struct ar_context ar_request_ctx;
189 struct ar_context ar_response_ctx;
190 struct context at_request_ctx;
191 struct context at_response_ctx;
193 u32 it_context_mask; /* unoccupied IT contexts */
194 struct iso_context *it_context_list;
195 u64 ir_context_channels; /* unoccupied channels */
196 u32 ir_context_mask; /* unoccupied IR contexts */
197 struct iso_context *ir_context_list;
198 u64 mc_channels; /* channels in use by the multichannel IR context */
202 dma_addr_t config_rom_bus;
203 __be32 *next_config_rom;
204 dma_addr_t next_config_rom_bus;
208 dma_addr_t self_id_bus;
209 struct tasklet_struct bus_reset_tasklet;
211 u32 self_id_buffer[512];
214 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
216 return container_of(card, struct fw_ohci, card);
219 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
220 #define IR_CONTEXT_BUFFER_FILL 0x80000000
221 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
222 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
223 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
224 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
226 #define CONTEXT_RUN 0x8000
227 #define CONTEXT_WAKE 0x1000
228 #define CONTEXT_DEAD 0x0800
229 #define CONTEXT_ACTIVE 0x0400
231 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
232 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
233 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
235 #define OHCI1394_REGISTER_SIZE 0x800
236 #define OHCI_LOOP_COUNT 500
237 #define OHCI1394_PCI_HCI_Control 0x40
238 #define SELF_ID_BUF_SIZE 0x800
239 #define OHCI_TCODE_PHY_PACKET 0x0e
240 #define OHCI_VERSION_1_1 0x010010
242 static char ohci_driver_name[] = KBUILD_MODNAME;
244 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
245 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
247 #define QUIRK_CYCLE_TIMER 1
248 #define QUIRK_RESET_PACKET 2
249 #define QUIRK_BE_HEADERS 4
250 #define QUIRK_NO_1394A 8
251 #define QUIRK_NO_MSI 16
253 /* In case of multiple matches in ohci_quirks[], only the first one is used. */
254 static const struct {
255 unsigned short vendor, device, flags;
257 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
260 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
261 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
262 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
263 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
264 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
265 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
268 /* This overrides anything that was found in ohci_quirks[]. */
269 static int param_quirks;
270 module_param_named(quirks, param_quirks, int, 0644);
271 MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
272 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
273 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
274 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
275 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
276 ", disable MSI = " __stringify(QUIRK_NO_MSI)
279 #define OHCI_PARAM_DEBUG_AT_AR 1
280 #define OHCI_PARAM_DEBUG_SELFIDS 2
281 #define OHCI_PARAM_DEBUG_IRQS 4
282 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
284 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
286 static int param_debug;
287 module_param_named(debug, param_debug, int, 0644);
288 MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
289 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
290 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
291 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
292 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
293 ", or a combination, or all = -1)");
295 static void log_irqs(u32 evt)
297 if (likely(!(param_debug &
298 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
301 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
302 !(evt & OHCI1394_busReset))
305 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
306 evt & OHCI1394_selfIDComplete ? " selfID" : "",
307 evt & OHCI1394_RQPkt ? " AR_req" : "",
308 evt & OHCI1394_RSPkt ? " AR_resp" : "",
309 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
310 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
311 evt & OHCI1394_isochRx ? " IR" : "",
312 evt & OHCI1394_isochTx ? " IT" : "",
313 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
314 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
315 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
316 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
317 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
318 evt & OHCI1394_busReset ? " busReset" : "",
319 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
320 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
321 OHCI1394_respTxComplete | OHCI1394_isochRx |
322 OHCI1394_isochTx | OHCI1394_postedWriteErr |
323 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
324 OHCI1394_cycleInconsistent |
325 OHCI1394_regAccessFail | OHCI1394_busReset)
329 static const char *speed[] = {
330 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
332 static const char *power[] = {
333 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
334 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
336 static const char port[] = { '.', '-', 'p', 'c', };
338 static char _p(u32 *s, int shift)
340 return port[*s >> shift & 3];
343 static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
345 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
348 fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
349 self_id_count, generation, node_id);
351 for (; self_id_count--; ++s)
352 if ((*s & 1 << 23) == 0)
353 fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
354 "%s gc=%d %s %s%s%s\n",
355 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
356 speed[*s >> 14 & 3], *s >> 16 & 63,
357 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
358 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
360 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
362 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
363 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
366 static const char *evts[] = {
367 [0x00] = "evt_no_status", [0x01] = "-reserved-",
368 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
369 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
370 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
371 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
372 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
373 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
374 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
375 [0x10] = "-reserved-", [0x11] = "ack_complete",
376 [0x12] = "ack_pending ", [0x13] = "-reserved-",
377 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
378 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
379 [0x18] = "-reserved-", [0x19] = "-reserved-",
380 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
381 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
382 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
383 [0x20] = "pending/cancelled",
385 static const char *tcodes[] = {
386 [0x0] = "QW req", [0x1] = "BW req",
387 [0x2] = "W resp", [0x3] = "-reserved-",
388 [0x4] = "QR req", [0x5] = "BR req",
389 [0x6] = "QR resp", [0x7] = "BR resp",
390 [0x8] = "cycle start", [0x9] = "Lk req",
391 [0xa] = "async stream packet", [0xb] = "Lk resp",
392 [0xc] = "-reserved-", [0xd] = "-reserved-",
393 [0xe] = "link internal", [0xf] = "-reserved-",
395 static const char *phys[] = {
396 [0x0] = "phy config packet", [0x1] = "link-on packet",
397 [0x2] = "self-id packet", [0x3] = "-reserved-",
400 static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
402 int tcode = header[0] >> 4 & 0xf;
405 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
408 if (unlikely(evt >= ARRAY_SIZE(evts)))
411 if (evt == OHCI1394_evt_bus_reset) {
412 fw_notify("A%c evt_bus_reset, generation %d\n",
413 dir, (header[2] >> 16) & 0xff);
417 if (header[0] == ~header[1]) {
418 fw_notify("A%c %s, %s, %08x\n",
419 dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
424 case 0x0: case 0x6: case 0x8:
425 snprintf(specific, sizeof(specific), " = %08x",
426 be32_to_cpu((__force __be32)header[3]));
428 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
429 snprintf(specific, sizeof(specific), " %x,%x",
430 header[3] >> 16, header[3] & 0xffff);
438 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
440 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
441 fw_notify("A%c spd %x tl %02x, "
444 dir, speed, header[0] >> 10 & 0x3f,
445 header[1] >> 16, header[0] >> 16, evts[evt],
446 tcodes[tcode], header[1] & 0xffff, header[2], specific);
449 fw_notify("A%c spd %x tl %02x, "
452 dir, speed, header[0] >> 10 & 0x3f,
453 header[1] >> 16, header[0] >> 16, evts[evt],
454 tcodes[tcode], specific);
460 #define param_debug 0
461 static inline void log_irqs(u32 evt) {}
462 static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
463 static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
465 #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
467 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
469 writel(data, ohci->registers + offset);
472 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
474 return readl(ohci->registers + offset);
477 static inline void flush_writes(const struct fw_ohci *ohci)
479 /* Do a dummy read to flush writes. */
480 reg_read(ohci, OHCI1394_Version);
483 static int read_phy_reg(struct fw_ohci *ohci, int addr)
488 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
489 for (i = 0; i < 3 + 100; i++) {
490 val = reg_read(ohci, OHCI1394_PhyControl);
491 if (val & OHCI1394_PhyControl_ReadDone)
492 return OHCI1394_PhyControl_ReadData(val);
495 * Try a few times without waiting. Sleeping is necessary
496 * only when the link/PHY interface is busy.
501 fw_error("failed to read phy reg\n");
506 static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
510 reg_write(ohci, OHCI1394_PhyControl,
511 OHCI1394_PhyControl_Write(addr, val));
512 for (i = 0; i < 3 + 100; i++) {
513 val = reg_read(ohci, OHCI1394_PhyControl);
514 if (!(val & OHCI1394_PhyControl_WritePending))
520 fw_error("failed to write phy reg\n");
525 static int update_phy_reg(struct fw_ohci *ohci, int addr,
526 int clear_bits, int set_bits)
528 int ret = read_phy_reg(ohci, addr);
533 * The interrupt status bits are cleared by writing a one bit.
534 * Avoid clearing them unless explicitly requested in set_bits.
537 clear_bits |= PHY_INT_STATUS_BITS;
539 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
542 static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
546 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
550 return read_phy_reg(ohci, addr);
553 static int ohci_read_phy_reg(struct fw_card *card, int addr)
555 struct fw_ohci *ohci = fw_ohci(card);
558 mutex_lock(&ohci->phy_reg_mutex);
559 ret = read_phy_reg(ohci, addr);
560 mutex_unlock(&ohci->phy_reg_mutex);
565 static int ohci_update_phy_reg(struct fw_card *card, int addr,
566 int clear_bits, int set_bits)
568 struct fw_ohci *ohci = fw_ohci(card);
571 mutex_lock(&ohci->phy_reg_mutex);
572 ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
573 mutex_unlock(&ohci->phy_reg_mutex);
578 static int ar_context_add_page(struct ar_context *ctx)
580 struct device *dev = ctx->ohci->card.device;
581 struct ar_buffer *ab;
582 dma_addr_t uninitialized_var(ab_bus);
585 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
590 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
591 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
593 DESCRIPTOR_BRANCH_ALWAYS);
594 offset = offsetof(struct ar_buffer, data);
595 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
596 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
597 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
598 ab->descriptor.branch_address = 0;
600 wmb(); /* finish init of new descriptors before branch_address update */
601 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
602 ctx->last_buffer->next = ab;
603 ctx->last_buffer = ab;
605 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
606 flush_writes(ctx->ohci);
611 static void ar_context_release(struct ar_context *ctx)
613 struct ar_buffer *ab, *ab_next;
617 for (ab = ctx->current_buffer; ab; ab = ab_next) {
619 offset = offsetof(struct ar_buffer, data);
620 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
621 dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
626 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
627 #define cond_le32_to_cpu(v) \
628 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
630 #define cond_le32_to_cpu(v) le32_to_cpu(v)
633 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
635 struct fw_ohci *ohci = ctx->ohci;
637 u32 status, length, tcode;
640 p.header[0] = cond_le32_to_cpu(buffer[0]);
641 p.header[1] = cond_le32_to_cpu(buffer[1]);
642 p.header[2] = cond_le32_to_cpu(buffer[2]);
644 tcode = (p.header[0] >> 4) & 0x0f;
646 case TCODE_WRITE_QUADLET_REQUEST:
647 case TCODE_READ_QUADLET_RESPONSE:
648 p.header[3] = (__force __u32) buffer[3];
649 p.header_length = 16;
650 p.payload_length = 0;
653 case TCODE_READ_BLOCK_REQUEST :
654 p.header[3] = cond_le32_to_cpu(buffer[3]);
655 p.header_length = 16;
656 p.payload_length = 0;
659 case TCODE_WRITE_BLOCK_REQUEST:
660 case TCODE_READ_BLOCK_RESPONSE:
661 case TCODE_LOCK_REQUEST:
662 case TCODE_LOCK_RESPONSE:
663 p.header[3] = cond_le32_to_cpu(buffer[3]);
664 p.header_length = 16;
665 p.payload_length = p.header[3] >> 16;
668 case TCODE_WRITE_RESPONSE:
669 case TCODE_READ_QUADLET_REQUEST:
670 case OHCI_TCODE_PHY_PACKET:
671 p.header_length = 12;
672 p.payload_length = 0;
676 /* FIXME: Stop context, discard everything, and restart? */
678 p.payload_length = 0;
681 p.payload = (void *) buffer + p.header_length;
683 /* FIXME: What to do about evt_* errors? */
684 length = (p.header_length + p.payload_length + 3) / 4;
685 status = cond_le32_to_cpu(buffer[length]);
686 evt = (status >> 16) & 0x1f;
689 p.speed = (status >> 21) & 0x7;
690 p.timestamp = status & 0xffff;
691 p.generation = ohci->request_generation;
693 log_ar_at_event('R', p.speed, p.header, evt);
696 * The OHCI bus reset handler synthesizes a phy packet with
697 * the new generation number when a bus reset happens (see
698 * section 8.4.2.3). This helps us determine when a request
699 * was received and make sure we send the response in the same
700 * generation. We only need this for requests; for responses
701 * we use the unique tlabel for finding the matching
704 * Alas some chips sometimes emit bus reset packets with a
705 * wrong generation. We set the correct generation for these
706 * at a slightly incorrect time (in bus_reset_tasklet).
708 if (evt == OHCI1394_evt_bus_reset) {
709 if (!(ohci->quirks & QUIRK_RESET_PACKET))
710 ohci->request_generation = (p.header[2] >> 16) & 0xff;
711 } else if (ctx == &ohci->ar_request_ctx) {
712 fw_core_handle_request(&ohci->card, &p);
714 fw_core_handle_response(&ohci->card, &p);
717 return buffer + length + 1;
720 static void ar_context_tasklet(unsigned long data)
722 struct ar_context *ctx = (struct ar_context *)data;
723 struct fw_ohci *ohci = ctx->ohci;
724 struct ar_buffer *ab;
725 struct descriptor *d;
728 ab = ctx->current_buffer;
731 if (d->res_count == 0) {
732 size_t size, rest, offset;
733 dma_addr_t start_bus;
737 * This descriptor is finished and we may have a
738 * packet split across this and the next buffer. We
739 * reuse the page for reassembling the split packet.
742 offset = offsetof(struct ar_buffer, data);
744 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
748 size = buffer + PAGE_SIZE - ctx->pointer;
749 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
750 memmove(buffer, ctx->pointer, size);
751 memcpy(buffer + size, ab->data, rest);
752 ctx->current_buffer = ab;
753 ctx->pointer = (void *) ab->data + rest;
754 end = buffer + size + rest;
757 buffer = handle_ar_packet(ctx, buffer);
759 dma_free_coherent(ohci->card.device, PAGE_SIZE,
761 ar_context_add_page(ctx);
763 buffer = ctx->pointer;
765 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
768 buffer = handle_ar_packet(ctx, buffer);
772 static int ar_context_init(struct ar_context *ctx,
773 struct fw_ohci *ohci, u32 regs)
779 ctx->last_buffer = &ab;
780 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
782 ar_context_add_page(ctx);
783 ar_context_add_page(ctx);
784 ctx->current_buffer = ab.next;
785 ctx->pointer = ctx->current_buffer->data;
790 static void ar_context_run(struct ar_context *ctx)
792 struct ar_buffer *ab = ctx->current_buffer;
796 offset = offsetof(struct ar_buffer, data);
797 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
799 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
800 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
801 flush_writes(ctx->ohci);
804 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
808 b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
809 key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
811 /* figure out which descriptor the branch address goes in */
812 if (z == 2 && (b == 3 || key == 2))
818 static void context_tasklet(unsigned long data)
820 struct context *ctx = (struct context *) data;
821 struct descriptor *d, *last;
824 struct descriptor_buffer *desc;
826 desc = list_entry(ctx->buffer_list.next,
827 struct descriptor_buffer, list);
829 while (last->branch_address != 0) {
830 struct descriptor_buffer *old_desc = desc;
831 address = le32_to_cpu(last->branch_address);
835 /* If the branch address points to a buffer outside of the
836 * current buffer, advance to the next buffer. */
837 if (address < desc->buffer_bus ||
838 address >= desc->buffer_bus + desc->used)
839 desc = list_entry(desc->list.next,
840 struct descriptor_buffer, list);
841 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
842 last = find_branch_descriptor(d, z);
844 if (!ctx->callback(ctx, d, last))
847 if (old_desc != desc) {
848 /* If we've advanced to the next buffer, move the
849 * previous buffer to the free list. */
852 spin_lock_irqsave(&ctx->ohci->lock, flags);
853 list_move_tail(&old_desc->list, &ctx->buffer_list);
854 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
861 * Allocate a new buffer and add it to the list of free buffers for this
862 * context. Must be called with ohci->lock held.
864 static int context_add_buffer(struct context *ctx)
866 struct descriptor_buffer *desc;
867 dma_addr_t uninitialized_var(bus_addr);
871 * 16MB of descriptors should be far more than enough for any DMA
872 * program. This will catch run-away userspace or DoS attacks.
874 if (ctx->total_allocation >= 16*1024*1024)
877 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
878 &bus_addr, GFP_ATOMIC);
882 offset = (void *)&desc->buffer - (void *)desc;
883 desc->buffer_size = PAGE_SIZE - offset;
884 desc->buffer_bus = bus_addr + offset;
887 list_add_tail(&desc->list, &ctx->buffer_list);
888 ctx->total_allocation += PAGE_SIZE;
893 static int context_init(struct context *ctx, struct fw_ohci *ohci,
894 u32 regs, descriptor_callback_t callback)
898 ctx->total_allocation = 0;
900 INIT_LIST_HEAD(&ctx->buffer_list);
901 if (context_add_buffer(ctx) < 0)
904 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
905 struct descriptor_buffer, list);
907 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
908 ctx->callback = callback;
911 * We put a dummy descriptor in the buffer that has a NULL
912 * branch address and looks like it's been sent. That way we
913 * have a descriptor to append DMA programs to.
915 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
916 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
917 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
918 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
919 ctx->last = ctx->buffer_tail->buffer;
920 ctx->prev = ctx->buffer_tail->buffer;
925 static void context_release(struct context *ctx)
927 struct fw_card *card = &ctx->ohci->card;
928 struct descriptor_buffer *desc, *tmp;
930 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
931 dma_free_coherent(card->device, PAGE_SIZE, desc,
933 ((void *)&desc->buffer - (void *)desc));
936 /* Must be called with ohci->lock held */
937 static struct descriptor *context_get_descriptors(struct context *ctx,
938 int z, dma_addr_t *d_bus)
940 struct descriptor *d = NULL;
941 struct descriptor_buffer *desc = ctx->buffer_tail;
943 if (z * sizeof(*d) > desc->buffer_size)
946 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
947 /* No room for the descriptor in this buffer, so advance to the
950 if (desc->list.next == &ctx->buffer_list) {
951 /* If there is no free buffer next in the list,
953 if (context_add_buffer(ctx) < 0)
956 desc = list_entry(desc->list.next,
957 struct descriptor_buffer, list);
958 ctx->buffer_tail = desc;
961 d = desc->buffer + desc->used / sizeof(*d);
962 memset(d, 0, z * sizeof(*d));
963 *d_bus = desc->buffer_bus + desc->used;
968 static void context_run(struct context *ctx, u32 extra)
970 struct fw_ohci *ohci = ctx->ohci;
972 reg_write(ohci, COMMAND_PTR(ctx->regs),
973 le32_to_cpu(ctx->last->branch_address));
974 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
975 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
979 static void context_append(struct context *ctx,
980 struct descriptor *d, int z, int extra)
983 struct descriptor_buffer *desc = ctx->buffer_tail;
985 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
987 desc->used += (z + extra) * sizeof(*d);
989 wmb(); /* finish init of new descriptors before branch_address update */
990 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
991 ctx->prev = find_branch_descriptor(d, z);
993 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
994 flush_writes(ctx->ohci);
997 static void context_stop(struct context *ctx)
1002 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1003 flush_writes(ctx->ohci);
1005 for (i = 0; i < 10; i++) {
1006 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
1007 if ((reg & CONTEXT_ACTIVE) == 0)
1012 fw_error("Error: DMA context still active (0x%08x)\n", reg);
1015 struct driver_data {
1016 struct fw_packet *packet;
1020 * This function apppends a packet to the DMA queue for transmission.
1021 * Must always be called with the ochi->lock held to ensure proper
1022 * generation handling and locking around packet queue manipulation.
1024 static int at_context_queue_packet(struct context *ctx,
1025 struct fw_packet *packet)
1027 struct fw_ohci *ohci = ctx->ohci;
1028 dma_addr_t d_bus, uninitialized_var(payload_bus);
1029 struct driver_data *driver_data;
1030 struct descriptor *d, *last;
1035 d = context_get_descriptors(ctx, 4, &d_bus);
1037 packet->ack = RCODE_SEND_ERROR;
1041 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1042 d[0].res_count = cpu_to_le16(packet->timestamp);
1045 * The DMA format for asyncronous link packets is different
1046 * from the IEEE1394 layout, so shift the fields around
1047 * accordingly. If header_length is 8, it's a PHY packet, to
1048 * which we need to prepend an extra quadlet.
1051 header = (__le32 *) &d[1];
1052 switch (packet->header_length) {
1055 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1056 (packet->speed << 16));
1057 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
1058 (packet->header[0] & 0xffff0000));
1059 header[2] = cpu_to_le32(packet->header[2]);
1061 tcode = (packet->header[0] >> 4) & 0x0f;
1062 if (TCODE_IS_BLOCK_PACKET(tcode))
1063 header[3] = cpu_to_le32(packet->header[3]);
1065 header[3] = (__force __le32) packet->header[3];
1067 d[0].req_count = cpu_to_le16(packet->header_length);
1071 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
1072 (packet->speed << 16));
1073 header[1] = cpu_to_le32(packet->header[0]);
1074 header[2] = cpu_to_le32(packet->header[1]);
1075 d[0].req_count = cpu_to_le16(12);
1077 if (is_ping_packet(packet->header))
1078 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1082 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1083 (packet->speed << 16));
1084 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
1085 d[0].req_count = cpu_to_le16(8);
1090 packet->ack = RCODE_SEND_ERROR;
1094 driver_data = (struct driver_data *) &d[3];
1095 driver_data->packet = packet;
1096 packet->driver_data = driver_data;
1098 if (packet->payload_length > 0) {
1100 dma_map_single(ohci->card.device, packet->payload,
1101 packet->payload_length, DMA_TO_DEVICE);
1102 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1103 packet->ack = RCODE_SEND_ERROR;
1106 packet->payload_bus = payload_bus;
1107 packet->payload_mapped = true;
1109 d[2].req_count = cpu_to_le16(packet->payload_length);
1110 d[2].data_address = cpu_to_le32(payload_bus);
1118 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1119 DESCRIPTOR_IRQ_ALWAYS |
1120 DESCRIPTOR_BRANCH_ALWAYS);
1123 * If the controller and packet generations don't match, we need to
1124 * bail out and try again. If IntEvent.busReset is set, the AT context
1125 * is halted, so appending to the context and trying to run it is
1126 * futile. Most controllers do the right thing and just flush the AT
1127 * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
1128 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
1129 * up stalling out. So we just bail out in software and try again
1130 * later, and everyone is happy.
1131 * FIXME: Document how the locking works.
1133 if (ohci->generation != packet->generation ||
1134 reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
1135 if (packet->payload_mapped)
1136 dma_unmap_single(ohci->card.device, payload_bus,
1137 packet->payload_length, DMA_TO_DEVICE);
1138 packet->ack = RCODE_GENERATION;
1142 context_append(ctx, d, z, 4 - z);
1144 /* If the context isn't already running, start it up. */
1145 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
1146 if ((reg & CONTEXT_RUN) == 0)
1147 context_run(ctx, 0);
1152 static int handle_at_packet(struct context *context,
1153 struct descriptor *d,
1154 struct descriptor *last)
1156 struct driver_data *driver_data;
1157 struct fw_packet *packet;
1158 struct fw_ohci *ohci = context->ohci;
1161 if (last->transfer_status == 0)
1162 /* This descriptor isn't done yet, stop iteration. */
1165 driver_data = (struct driver_data *) &d[3];
1166 packet = driver_data->packet;
1168 /* This packet was cancelled, just continue. */
1171 if (packet->payload_mapped)
1172 dma_unmap_single(ohci->card.device, packet->payload_bus,
1173 packet->payload_length, DMA_TO_DEVICE);
1175 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1176 packet->timestamp = le16_to_cpu(last->res_count);
1178 log_ar_at_event('T', packet->speed, packet->header, evt);
1181 case OHCI1394_evt_timeout:
1182 /* Async response transmit timed out. */
1183 packet->ack = RCODE_CANCELLED;
1186 case OHCI1394_evt_flushed:
1188 * The packet was flushed should give same error as
1189 * when we try to use a stale generation count.
1191 packet->ack = RCODE_GENERATION;
1194 case OHCI1394_evt_missing_ack:
1196 * Using a valid (current) generation count, but the
1197 * node is not on the bus or not sending acks.
1199 packet->ack = RCODE_NO_ACK;
1202 case ACK_COMPLETE + 0x10:
1203 case ACK_PENDING + 0x10:
1204 case ACK_BUSY_X + 0x10:
1205 case ACK_BUSY_A + 0x10:
1206 case ACK_BUSY_B + 0x10:
1207 case ACK_DATA_ERROR + 0x10:
1208 case ACK_TYPE_ERROR + 0x10:
1209 packet->ack = evt - 0x10;
1213 packet->ack = RCODE_SEND_ERROR;
1217 packet->callback(packet, &ohci->card, packet->ack);
1222 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1223 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1224 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1225 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1226 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1228 static void handle_local_rom(struct fw_ohci *ohci,
1229 struct fw_packet *packet, u32 csr)
1231 struct fw_packet response;
1232 int tcode, length, i;
1234 tcode = HEADER_GET_TCODE(packet->header[0]);
1235 if (TCODE_IS_BLOCK_PACKET(tcode))
1236 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1240 i = csr - CSR_CONFIG_ROM;
1241 if (i + length > CONFIG_ROM_SIZE) {
1242 fw_fill_response(&response, packet->header,
1243 RCODE_ADDRESS_ERROR, NULL, 0);
1244 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
1245 fw_fill_response(&response, packet->header,
1246 RCODE_TYPE_ERROR, NULL, 0);
1248 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1249 (void *) ohci->config_rom + i, length);
1252 fw_core_handle_response(&ohci->card, &response);
1255 static void handle_local_lock(struct fw_ohci *ohci,
1256 struct fw_packet *packet, u32 csr)
1258 struct fw_packet response;
1259 int tcode, length, ext_tcode, sel;
1260 __be32 *payload, lock_old;
1261 u32 lock_arg, lock_data;
1263 tcode = HEADER_GET_TCODE(packet->header[0]);
1264 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1265 payload = packet->payload;
1266 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
1268 if (tcode == TCODE_LOCK_REQUEST &&
1269 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1270 lock_arg = be32_to_cpu(payload[0]);
1271 lock_data = be32_to_cpu(payload[1]);
1272 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1276 fw_fill_response(&response, packet->header,
1277 RCODE_TYPE_ERROR, NULL, 0);
1281 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1282 reg_write(ohci, OHCI1394_CSRData, lock_data);
1283 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1284 reg_write(ohci, OHCI1394_CSRControl, sel);
1286 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
1287 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
1289 fw_notify("swap not done yet\n");
1291 fw_fill_response(&response, packet->header,
1292 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
1294 fw_core_handle_response(&ohci->card, &response);
1297 static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1302 if (ctx == &ctx->ohci->at_request_ctx) {
1303 packet->ack = ACK_PENDING;
1304 packet->callback(packet, &ctx->ohci->card, packet->ack);
1308 ((unsigned long long)
1309 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1311 csr = offset - CSR_REGISTER_BASE;
1313 /* Handle config rom reads. */
1314 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1315 handle_local_rom(ctx->ohci, packet, csr);
1317 case CSR_BUS_MANAGER_ID:
1318 case CSR_BANDWIDTH_AVAILABLE:
1319 case CSR_CHANNELS_AVAILABLE_HI:
1320 case CSR_CHANNELS_AVAILABLE_LO:
1321 handle_local_lock(ctx->ohci, packet, csr);
1324 if (ctx == &ctx->ohci->at_request_ctx)
1325 fw_core_handle_request(&ctx->ohci->card, packet);
1327 fw_core_handle_response(&ctx->ohci->card, packet);
1331 if (ctx == &ctx->ohci->at_response_ctx) {
1332 packet->ack = ACK_COMPLETE;
1333 packet->callback(packet, &ctx->ohci->card, packet->ack);
1337 static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1339 unsigned long flags;
1342 spin_lock_irqsave(&ctx->ohci->lock, flags);
1344 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1345 ctx->ohci->generation == packet->generation) {
1346 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1347 handle_local_request(ctx, packet);
1351 ret = at_context_queue_packet(ctx, packet);
1352 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1355 packet->callback(packet, &ctx->ohci->card, packet->ack);
1359 static u32 cycle_timer_ticks(u32 cycle_timer)
1363 ticks = cycle_timer & 0xfff;
1364 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1365 ticks += (3072 * 8000) * (cycle_timer >> 25);
1371 * Some controllers exhibit one or more of the following bugs when updating the
1372 * iso cycle timer register:
1373 * - When the lowest six bits are wrapping around to zero, a read that happens
1374 * at the same time will return garbage in the lowest ten bits.
1375 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1376 * not incremented for about 60 ns.
1377 * - Occasionally, the entire register reads zero.
1379 * To catch these, we read the register three times and ensure that the
1380 * difference between each two consecutive reads is approximately the same, i.e.
1381 * less than twice the other. Furthermore, any negative difference indicates an
1382 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1383 * execute, so we have enough precision to compute the ratio of the differences.)
1385 static u32 get_cycle_time(struct fw_ohci *ohci)
1392 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1394 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1397 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1401 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1402 t0 = cycle_timer_ticks(c0);
1403 t1 = cycle_timer_ticks(c1);
1404 t2 = cycle_timer_ticks(c2);
1407 } while ((diff01 <= 0 || diff12 <= 0 ||
1408 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1416 * This function has to be called at least every 64 seconds. The bus_time
1417 * field stores not only the upper 25 bits of the BUS_TIME register but also
1418 * the most significant bit of the cycle timer in bit 6 so that we can detect
1419 * changes in this bit.
1421 static u32 update_bus_time(struct fw_ohci *ohci)
1423 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1425 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1426 ohci->bus_time += 0x40;
1428 return ohci->bus_time | cycle_time_seconds;
1431 static void bus_reset_tasklet(unsigned long data)
1433 struct fw_ohci *ohci = (struct fw_ohci *)data;
1434 int self_id_count, i, j, reg;
1435 int generation, new_generation;
1436 unsigned long flags;
1437 void *free_rom = NULL;
1438 dma_addr_t free_rom_bus = 0;
1441 reg = reg_read(ohci, OHCI1394_NodeID);
1442 if (!(reg & OHCI1394_NodeID_idValid)) {
1443 fw_notify("node ID not valid, new bus reset in progress\n");
1446 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1447 fw_notify("malconfigured bus\n");
1450 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1451 OHCI1394_NodeID_nodeNumber);
1453 is_new_root = (reg & OHCI1394_NodeID_root) != 0;
1454 if (!(ohci->is_root && is_new_root))
1455 reg_write(ohci, OHCI1394_LinkControlSet,
1456 OHCI1394_LinkControl_cycleMaster);
1457 ohci->is_root = is_new_root;
1459 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1460 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1461 fw_notify("inconsistent self IDs\n");
1465 * The count in the SelfIDCount register is the number of
1466 * bytes in the self ID receive buffer. Since we also receive
1467 * the inverted quadlets and a header quadlet, we shift one
1468 * bit extra to get the actual number of self IDs.
1470 self_id_count = (reg >> 3) & 0xff;
1471 if (self_id_count == 0 || self_id_count > 252) {
1472 fw_notify("inconsistent self IDs\n");
1475 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1478 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1479 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
1480 fw_notify("inconsistent self IDs\n");
1483 ohci->self_id_buffer[j] =
1484 cond_le32_to_cpu(ohci->self_id_cpu[i]);
1489 * Check the consistency of the self IDs we just read. The
1490 * problem we face is that a new bus reset can start while we
1491 * read out the self IDs from the DMA buffer. If this happens,
1492 * the DMA buffer will be overwritten with new self IDs and we
1493 * will read out inconsistent data. The OHCI specification
1494 * (section 11.2) recommends a technique similar to
1495 * linux/seqlock.h, where we remember the generation of the
1496 * self IDs in the buffer before reading them out and compare
1497 * it to the current generation after reading them out. If
1498 * the two generations match we know we have a consistent set
1502 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1503 if (new_generation != generation) {
1504 fw_notify("recursive bus reset detected, "
1505 "discarding self ids\n");
1509 /* FIXME: Document how the locking works. */
1510 spin_lock_irqsave(&ohci->lock, flags);
1512 ohci->generation = generation;
1513 context_stop(&ohci->at_request_ctx);
1514 context_stop(&ohci->at_response_ctx);
1515 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1517 if (ohci->quirks & QUIRK_RESET_PACKET)
1518 ohci->request_generation = generation;
1521 * This next bit is unrelated to the AT context stuff but we
1522 * have to do it under the spinlock also. If a new config rom
1523 * was set up before this reset, the old one is now no longer
1524 * in use and we can free it. Update the config rom pointers
1525 * to point to the current config rom and clear the
1526 * next_config_rom pointer so a new udpate can take place.
1529 if (ohci->next_config_rom != NULL) {
1530 if (ohci->next_config_rom != ohci->config_rom) {
1531 free_rom = ohci->config_rom;
1532 free_rom_bus = ohci->config_rom_bus;
1534 ohci->config_rom = ohci->next_config_rom;
1535 ohci->config_rom_bus = ohci->next_config_rom_bus;
1536 ohci->next_config_rom = NULL;
1539 * Restore config_rom image and manually update
1540 * config_rom registers. Writing the header quadlet
1541 * will indicate that the config rom is ready, so we
1544 reg_write(ohci, OHCI1394_BusOptions,
1545 be32_to_cpu(ohci->config_rom[2]));
1546 ohci->config_rom[0] = ohci->next_header;
1547 reg_write(ohci, OHCI1394_ConfigROMhdr,
1548 be32_to_cpu(ohci->next_header));
1551 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1552 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
1553 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
1556 spin_unlock_irqrestore(&ohci->lock, flags);
1559 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1560 free_rom, free_rom_bus);
1562 log_selfids(ohci->node_id, generation,
1563 self_id_count, ohci->self_id_buffer);
1565 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
1566 self_id_count, ohci->self_id_buffer,
1567 ohci->csr_state_setclear_abdicate);
1568 ohci->csr_state_setclear_abdicate = false;
1571 static irqreturn_t irq_handler(int irq, void *data)
1573 struct fw_ohci *ohci = data;
1574 u32 event, iso_event;
1577 event = reg_read(ohci, OHCI1394_IntEventClear);
1579 if (!event || !~event)
1582 /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
1583 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
1586 if (event & OHCI1394_selfIDComplete)
1587 tasklet_schedule(&ohci->bus_reset_tasklet);
1589 if (event & OHCI1394_RQPkt)
1590 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1592 if (event & OHCI1394_RSPkt)
1593 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1595 if (event & OHCI1394_reqTxComplete)
1596 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1598 if (event & OHCI1394_respTxComplete)
1599 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1601 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1602 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1605 i = ffs(iso_event) - 1;
1606 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
1607 iso_event &= ~(1 << i);
1610 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1611 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1614 i = ffs(iso_event) - 1;
1615 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
1616 iso_event &= ~(1 << i);
1619 if (unlikely(event & OHCI1394_regAccessFail))
1620 fw_error("Register access failure - "
1621 "please notify linux1394-devel@lists.sf.net\n");
1623 if (unlikely(event & OHCI1394_postedWriteErr))
1624 fw_error("PCI posted write error\n");
1626 if (unlikely(event & OHCI1394_cycleTooLong)) {
1627 if (printk_ratelimit())
1628 fw_notify("isochronous cycle too long\n");
1629 reg_write(ohci, OHCI1394_LinkControlSet,
1630 OHCI1394_LinkControl_cycleMaster);
1633 if (unlikely(event & OHCI1394_cycleInconsistent)) {
1635 * We need to clear this event bit in order to make
1636 * cycleMatch isochronous I/O work. In theory we should
1637 * stop active cycleMatch iso contexts now and restart
1638 * them at least two cycles later. (FIXME?)
1640 if (printk_ratelimit())
1641 fw_notify("isochronous cycle inconsistent\n");
1644 if (event & OHCI1394_cycle64Seconds) {
1645 spin_lock(&ohci->lock);
1646 update_bus_time(ohci);
1647 spin_unlock(&ohci->lock);
1653 static int software_reset(struct fw_ohci *ohci)
1657 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1659 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1660 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1661 OHCI1394_HCControl_softReset) == 0)
1669 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
1671 size_t size = length * 4;
1673 memcpy(dest, src, size);
1674 if (size < CONFIG_ROM_SIZE)
1675 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
1678 static int configure_1394a_enhancements(struct fw_ohci *ohci)
1681 int ret, clear, set, offset;
1683 /* Check if the driver should configure link and PHY. */
1684 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
1685 OHCI1394_HCControl_programPhyEnable))
1688 /* Paranoia: check whether the PHY supports 1394a, too. */
1689 enable_1394a = false;
1690 ret = read_phy_reg(ohci, 2);
1693 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
1694 ret = read_paged_phy_reg(ohci, 1, 8);
1698 enable_1394a = true;
1701 if (ohci->quirks & QUIRK_NO_1394A)
1702 enable_1394a = false;
1704 /* Configure PHY and link consistently. */
1707 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
1709 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
1712 ret = update_phy_reg(ohci, 5, clear, set);
1717 offset = OHCI1394_HCControlSet;
1719 offset = OHCI1394_HCControlClear;
1720 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
1722 /* Clean up: configuration has been taken care of. */
1723 reg_write(ohci, OHCI1394_HCControlClear,
1724 OHCI1394_HCControl_programPhyEnable);
1729 static int ohci_enable(struct fw_card *card,
1730 const __be32 *config_rom, size_t length)
1732 struct fw_ohci *ohci = fw_ohci(card);
1733 struct pci_dev *dev = to_pci_dev(card->device);
1734 u32 lps, seconds, version, irqs;
1737 if (software_reset(ohci)) {
1738 fw_error("Failed to reset ohci card.\n");
1743 * Now enable LPS, which we need in order to start accessing
1744 * most of the registers. In fact, on some cards (ALI M5251),
1745 * accessing registers in the SClk domain without LPS enabled
1746 * will lock up the machine. Wait 50msec to make sure we have
1747 * full link enabled. However, with some cards (well, at least
1748 * a JMicron PCIe card), we have to try again sometimes.
1750 reg_write(ohci, OHCI1394_HCControlSet,
1751 OHCI1394_HCControl_LPS |
1752 OHCI1394_HCControl_postedWriteEnable);
1755 for (lps = 0, i = 0; !lps && i < 3; i++) {
1757 lps = reg_read(ohci, OHCI1394_HCControlSet) &
1758 OHCI1394_HCControl_LPS;
1762 fw_error("Failed to set Link Power Status\n");
1766 reg_write(ohci, OHCI1394_HCControlClear,
1767 OHCI1394_HCControl_noByteSwapData);
1769 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1770 reg_write(ohci, OHCI1394_LinkControlSet,
1771 OHCI1394_LinkControl_rcvSelfID |
1772 OHCI1394_LinkControl_rcvPhyPkt |
1773 OHCI1394_LinkControl_cycleTimerEnable |
1774 OHCI1394_LinkControl_cycleMaster);
1776 reg_write(ohci, OHCI1394_ATRetries,
1777 OHCI1394_MAX_AT_REQ_RETRIES |
1778 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1779 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
1782 seconds = lower_32_bits(get_seconds());
1783 reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25);
1784 ohci->bus_time = seconds & ~0x3f;
1786 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
1787 if (version >= OHCI_VERSION_1_1) {
1788 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
1790 card->broadcast_channel_auto_allocated = true;
1793 /* Get implemented bits of the priority arbitration request counter. */
1794 reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
1795 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
1796 reg_write(ohci, OHCI1394_FairnessControl, 0);
1797 card->priority_budget_implemented = ohci->pri_req_max != 0;
1799 ar_context_run(&ohci->ar_request_ctx);
1800 ar_context_run(&ohci->ar_response_ctx);
1802 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1803 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1804 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1806 ret = configure_1394a_enhancements(ohci);
1810 /* Activate link_on bit and contender bit in our self ID packets.*/
1811 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
1816 * When the link is not yet enabled, the atomic config rom
1817 * update mechanism described below in ohci_set_config_rom()
1818 * is not active. We have to update ConfigRomHeader and
1819 * BusOptions manually, and the write to ConfigROMmap takes
1820 * effect immediately. We tie this to the enabling of the
1821 * link, so we have a valid config rom before enabling - the
1822 * OHCI requires that ConfigROMhdr and BusOptions have valid
1823 * values before enabling.
1825 * However, when the ConfigROMmap is written, some controllers
1826 * always read back quadlets 0 and 2 from the config rom to
1827 * the ConfigRomHeader and BusOptions registers on bus reset.
1828 * They shouldn't do that in this initial case where the link
1829 * isn't enabled. This means we have to use the same
1830 * workaround here, setting the bus header to 0 and then write
1831 * the right values in the bus reset tasklet.
1835 ohci->next_config_rom =
1836 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1837 &ohci->next_config_rom_bus,
1839 if (ohci->next_config_rom == NULL)
1842 copy_config_rom(ohci->next_config_rom, config_rom, length);
1845 * In the suspend case, config_rom is NULL, which
1846 * means that we just reuse the old config rom.
1848 ohci->next_config_rom = ohci->config_rom;
1849 ohci->next_config_rom_bus = ohci->config_rom_bus;
1852 ohci->next_header = ohci->next_config_rom[0];
1853 ohci->next_config_rom[0] = 0;
1854 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1855 reg_write(ohci, OHCI1394_BusOptions,
1856 be32_to_cpu(ohci->next_config_rom[2]));
1857 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1859 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1861 if (!(ohci->quirks & QUIRK_NO_MSI))
1862 pci_enable_msi(dev);
1863 if (request_irq(dev->irq, irq_handler,
1864 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
1865 ohci_driver_name, ohci)) {
1866 fw_error("Failed to allocate interrupt %d.\n", dev->irq);
1867 pci_disable_msi(dev);
1868 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1869 ohci->config_rom, ohci->config_rom_bus);
1873 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1874 OHCI1394_RQPkt | OHCI1394_RSPkt |
1875 OHCI1394_isochTx | OHCI1394_isochRx |
1876 OHCI1394_postedWriteErr |
1877 OHCI1394_selfIDComplete |
1878 OHCI1394_regAccessFail |
1879 OHCI1394_cycle64Seconds |
1880 OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong |
1881 OHCI1394_masterIntEnable;
1882 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1883 irqs |= OHCI1394_busReset;
1884 reg_write(ohci, OHCI1394_IntMaskSet, irqs);
1886 reg_write(ohci, OHCI1394_HCControlSet,
1887 OHCI1394_HCControl_linkEnable |
1888 OHCI1394_HCControl_BIBimageValid);
1891 /* We are ready to go, reset bus to finish initialization. */
1892 fw_schedule_bus_reset(&ohci->card, false, true);
1897 static int ohci_set_config_rom(struct fw_card *card,
1898 const __be32 *config_rom, size_t length)
1900 struct fw_ohci *ohci;
1901 unsigned long flags;
1903 __be32 *next_config_rom;
1904 dma_addr_t uninitialized_var(next_config_rom_bus);
1906 ohci = fw_ohci(card);
1909 * When the OHCI controller is enabled, the config rom update
1910 * mechanism is a bit tricky, but easy enough to use. See
1911 * section 5.5.6 in the OHCI specification.
1913 * The OHCI controller caches the new config rom address in a
1914 * shadow register (ConfigROMmapNext) and needs a bus reset
1915 * for the changes to take place. When the bus reset is
1916 * detected, the controller loads the new values for the
1917 * ConfigRomHeader and BusOptions registers from the specified
1918 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1919 * shadow register. All automatically and atomically.
1921 * Now, there's a twist to this story. The automatic load of
1922 * ConfigRomHeader and BusOptions doesn't honor the
1923 * noByteSwapData bit, so with a be32 config rom, the
1924 * controller will load be32 values in to these registers
1925 * during the atomic update, even on litte endian
1926 * architectures. The workaround we use is to put a 0 in the
1927 * header quadlet; 0 is endian agnostic and means that the
1928 * config rom isn't ready yet. In the bus reset tasklet we
1929 * then set up the real values for the two registers.
1931 * We use ohci->lock to avoid racing with the code that sets
1932 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1936 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1937 &next_config_rom_bus, GFP_KERNEL);
1938 if (next_config_rom == NULL)
1941 spin_lock_irqsave(&ohci->lock, flags);
1943 if (ohci->next_config_rom == NULL) {
1944 ohci->next_config_rom = next_config_rom;
1945 ohci->next_config_rom_bus = next_config_rom_bus;
1947 copy_config_rom(ohci->next_config_rom, config_rom, length);
1949 ohci->next_header = config_rom[0];
1950 ohci->next_config_rom[0] = 0;
1952 reg_write(ohci, OHCI1394_ConfigROMmap,
1953 ohci->next_config_rom_bus);
1957 spin_unlock_irqrestore(&ohci->lock, flags);
1960 * Now initiate a bus reset to have the changes take
1961 * effect. We clean up the old config rom memory and DMA
1962 * mappings in the bus reset tasklet, since the OHCI
1963 * controller could need to access it before the bus reset
1967 fw_schedule_bus_reset(&ohci->card, true, true);
1969 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1970 next_config_rom, next_config_rom_bus);
1975 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1977 struct fw_ohci *ohci = fw_ohci(card);
1979 at_context_transmit(&ohci->at_request_ctx, packet);
1982 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1984 struct fw_ohci *ohci = fw_ohci(card);
1986 at_context_transmit(&ohci->at_response_ctx, packet);
1989 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1991 struct fw_ohci *ohci = fw_ohci(card);
1992 struct context *ctx = &ohci->at_request_ctx;
1993 struct driver_data *driver_data = packet->driver_data;
1996 tasklet_disable(&ctx->tasklet);
1998 if (packet->ack != 0)
2001 if (packet->payload_mapped)
2002 dma_unmap_single(ohci->card.device, packet->payload_bus,
2003 packet->payload_length, DMA_TO_DEVICE);
2005 log_ar_at_event('T', packet->speed, packet->header, 0x20);
2006 driver_data->packet = NULL;
2007 packet->ack = RCODE_CANCELLED;
2008 packet->callback(packet, &ohci->card, packet->ack);
2011 tasklet_enable(&ctx->tasklet);
2016 static int ohci_enable_phys_dma(struct fw_card *card,
2017 int node_id, int generation)
2019 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
2022 struct fw_ohci *ohci = fw_ohci(card);
2023 unsigned long flags;
2027 * FIXME: Make sure this bitmask is cleared when we clear the busReset
2028 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
2031 spin_lock_irqsave(&ohci->lock, flags);
2033 if (ohci->generation != generation) {
2039 * Note, if the node ID contains a non-local bus ID, physical DMA is
2040 * enabled for _all_ nodes on remote buses.
2043 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
2045 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2047 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2051 spin_unlock_irqrestore(&ohci->lock, flags);
2054 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
2057 static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
2059 struct fw_ohci *ohci = fw_ohci(card);
2060 unsigned long flags;
2063 switch (csr_offset) {
2064 case CSR_STATE_CLEAR:
2066 if (ohci->is_root &&
2067 (reg_read(ohci, OHCI1394_LinkControlSet) &
2068 OHCI1394_LinkControl_cycleMaster))
2069 value = CSR_STATE_BIT_CMSTR;
2072 if (ohci->csr_state_setclear_abdicate)
2073 value |= CSR_STATE_BIT_ABDICATE;
2078 return reg_read(ohci, OHCI1394_NodeID) << 16;
2080 case CSR_CYCLE_TIME:
2081 return get_cycle_time(ohci);
2085 * We might be called just after the cycle timer has wrapped
2086 * around but just before the cycle64Seconds handler, so we
2087 * better check here, too, if the bus time needs to be updated.
2089 spin_lock_irqsave(&ohci->lock, flags);
2090 value = update_bus_time(ohci);
2091 spin_unlock_irqrestore(&ohci->lock, flags);
2094 case CSR_BUSY_TIMEOUT:
2095 value = reg_read(ohci, OHCI1394_ATRetries);
2096 return (value >> 4) & 0x0ffff00f;
2098 case CSR_PRIORITY_BUDGET:
2099 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2100 (ohci->pri_req_max << 8);
2108 static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2110 struct fw_ohci *ohci = fw_ohci(card);
2111 unsigned long flags;
2113 switch (csr_offset) {
2114 case CSR_STATE_CLEAR:
2115 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2116 reg_write(ohci, OHCI1394_LinkControlClear,
2117 OHCI1394_LinkControl_cycleMaster);
2120 if (value & CSR_STATE_BIT_ABDICATE)
2121 ohci->csr_state_setclear_abdicate = false;
2125 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2126 reg_write(ohci, OHCI1394_LinkControlSet,
2127 OHCI1394_LinkControl_cycleMaster);
2130 if (value & CSR_STATE_BIT_ABDICATE)
2131 ohci->csr_state_setclear_abdicate = true;
2135 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2139 case CSR_CYCLE_TIME:
2140 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2141 reg_write(ohci, OHCI1394_IntEventSet,
2142 OHCI1394_cycleInconsistent);
2147 spin_lock_irqsave(&ohci->lock, flags);
2148 ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f);
2149 spin_unlock_irqrestore(&ohci->lock, flags);
2152 case CSR_BUSY_TIMEOUT:
2153 value = (value & 0xf) | ((value & 0xf) << 4) |
2154 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2155 reg_write(ohci, OHCI1394_ATRetries, value);
2159 case CSR_PRIORITY_BUDGET:
2160 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2170 static void copy_iso_headers(struct iso_context *ctx, void *p)
2172 int i = ctx->header_length;
2174 if (i + ctx->base.header_size > PAGE_SIZE)
2178 * The iso header is byteswapped to little endian by
2179 * the controller, but the remaining header quadlets
2180 * are big endian. We want to present all the headers
2181 * as big endian, so we have to swap the first quadlet.
2183 if (ctx->base.header_size > 0)
2184 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
2185 if (ctx->base.header_size > 4)
2186 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
2187 if (ctx->base.header_size > 8)
2188 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
2189 ctx->header_length += ctx->base.header_size;
2192 static int handle_ir_packet_per_buffer(struct context *context,
2193 struct descriptor *d,
2194 struct descriptor *last)
2196 struct iso_context *ctx =
2197 container_of(context, struct iso_context, context);
2198 struct descriptor *pd;
2202 for (pd = d; pd <= last; pd++)
2203 if (pd->transfer_status)
2206 /* Descriptor(s) not done yet, stop iteration */
2210 copy_iso_headers(ctx, p);
2212 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
2213 ir_header = (__le32 *) p;
2214 ctx->base.callback.sc(&ctx->base,
2215 le32_to_cpu(ir_header[0]) & 0xffff,
2216 ctx->header_length, ctx->header,
2217 ctx->base.callback_data);
2218 ctx->header_length = 0;
2224 /* d == last because each descriptor block is only a single descriptor. */
2225 static int handle_ir_buffer_fill(struct context *context,
2226 struct descriptor *d,
2227 struct descriptor *last)
2229 struct iso_context *ctx =
2230 container_of(context, struct iso_context, context);
2232 if (!last->transfer_status)
2233 /* Descriptor(s) not done yet, stop iteration */
2236 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
2237 ctx->base.callback.mc(&ctx->base,
2238 le32_to_cpu(last->data_address) +
2239 le16_to_cpu(last->req_count) -
2240 le16_to_cpu(last->res_count),
2241 ctx->base.callback_data);
2246 static int handle_it_packet(struct context *context,
2247 struct descriptor *d,
2248 struct descriptor *last)
2250 struct iso_context *ctx =
2251 container_of(context, struct iso_context, context);
2253 struct descriptor *pd;
2255 for (pd = d; pd <= last; pd++)
2256 if (pd->transfer_status)
2259 /* Descriptor(s) not done yet, stop iteration */
2262 i = ctx->header_length;
2263 if (i + 4 < PAGE_SIZE) {
2264 /* Present this value as big-endian to match the receive code */
2265 *(__be32 *)(ctx->header + i) = cpu_to_be32(
2266 ((u32)le16_to_cpu(pd->transfer_status) << 16) |
2267 le16_to_cpu(pd->res_count));
2268 ctx->header_length += 4;
2270 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
2271 ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count),
2272 ctx->header_length, ctx->header,
2273 ctx->base.callback_data);
2274 ctx->header_length = 0;
2279 static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2281 u32 hi = channels >> 32, lo = channels;
2283 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2284 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2285 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2286 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2288 ohci->mc_channels = channels;
2291 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2292 int type, int channel, size_t header_size)
2294 struct fw_ohci *ohci = fw_ohci(card);
2295 struct iso_context *uninitialized_var(ctx);
2296 descriptor_callback_t uninitialized_var(callback);
2297 u64 *uninitialized_var(channels);
2298 u32 *uninitialized_var(mask), uninitialized_var(regs);
2299 unsigned long flags;
2300 int index, ret = -EBUSY;
2302 spin_lock_irqsave(&ohci->lock, flags);
2305 case FW_ISO_CONTEXT_TRANSMIT:
2306 mask = &ohci->it_context_mask;
2307 callback = handle_it_packet;
2308 index = ffs(*mask) - 1;
2310 *mask &= ~(1 << index);
2311 regs = OHCI1394_IsoXmitContextBase(index);
2312 ctx = &ohci->it_context_list[index];
2316 case FW_ISO_CONTEXT_RECEIVE:
2317 channels = &ohci->ir_context_channels;
2318 mask = &ohci->ir_context_mask;
2319 callback = handle_ir_packet_per_buffer;
2320 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
2322 *channels &= ~(1ULL << channel);
2323 *mask &= ~(1 << index);
2324 regs = OHCI1394_IsoRcvContextBase(index);
2325 ctx = &ohci->ir_context_list[index];
2329 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2330 mask = &ohci->ir_context_mask;
2331 callback = handle_ir_buffer_fill;
2332 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
2334 ohci->mc_allocated = true;
2335 *mask &= ~(1 << index);
2336 regs = OHCI1394_IsoRcvContextBase(index);
2337 ctx = &ohci->ir_context_list[index];
2346 spin_unlock_irqrestore(&ohci->lock, flags);
2349 return ERR_PTR(ret);
2351 memset(ctx, 0, sizeof(*ctx));
2352 ctx->header_length = 0;
2353 ctx->header = (void *) __get_free_page(GFP_KERNEL);
2354 if (ctx->header == NULL) {
2358 ret = context_init(&ctx->context, ohci, regs, callback);
2360 goto out_with_header;
2362 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
2363 set_multichannel_mask(ohci, 0);
2368 free_page((unsigned long)ctx->header);
2370 spin_lock_irqsave(&ohci->lock, flags);
2373 case FW_ISO_CONTEXT_RECEIVE:
2374 *channels |= 1ULL << channel;
2377 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2378 ohci->mc_allocated = false;
2381 *mask |= 1 << index;
2383 spin_unlock_irqrestore(&ohci->lock, flags);
2385 return ERR_PTR(ret);
2388 static int ohci_start_iso(struct fw_iso_context *base,
2389 s32 cycle, u32 sync, u32 tags)
2391 struct iso_context *ctx = container_of(base, struct iso_context, base);
2392 struct fw_ohci *ohci = ctx->context.ohci;
2393 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
2396 switch (ctx->base.type) {
2397 case FW_ISO_CONTEXT_TRANSMIT:
2398 index = ctx - ohci->it_context_list;
2401 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
2402 (cycle & 0x7fff) << 16;
2404 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
2405 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
2406 context_run(&ctx->context, match);
2409 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2410 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
2412 case FW_ISO_CONTEXT_RECEIVE:
2413 index = ctx - ohci->ir_context_list;
2414 match = (tags << 28) | (sync << 8) | ctx->base.channel;
2416 match |= (cycle & 0x07fff) << 12;
2417 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
2420 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
2421 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
2422 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
2423 context_run(&ctx->context, control);
2430 static int ohci_stop_iso(struct fw_iso_context *base)
2432 struct fw_ohci *ohci = fw_ohci(base->card);
2433 struct iso_context *ctx = container_of(base, struct iso_context, base);
2436 switch (ctx->base.type) {
2437 case FW_ISO_CONTEXT_TRANSMIT:
2438 index = ctx - ohci->it_context_list;
2439 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
2442 case FW_ISO_CONTEXT_RECEIVE:
2443 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2444 index = ctx - ohci->ir_context_list;
2445 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
2449 context_stop(&ctx->context);
2454 static void ohci_free_iso_context(struct fw_iso_context *base)
2456 struct fw_ohci *ohci = fw_ohci(base->card);
2457 struct iso_context *ctx = container_of(base, struct iso_context, base);
2458 unsigned long flags;
2461 ohci_stop_iso(base);
2462 context_release(&ctx->context);
2463 free_page((unsigned long)ctx->header);
2465 spin_lock_irqsave(&ohci->lock, flags);
2467 switch (base->type) {
2468 case FW_ISO_CONTEXT_TRANSMIT:
2469 index = ctx - ohci->it_context_list;
2470 ohci->it_context_mask |= 1 << index;
2473 case FW_ISO_CONTEXT_RECEIVE:
2474 index = ctx - ohci->ir_context_list;
2475 ohci->ir_context_mask |= 1 << index;
2476 ohci->ir_context_channels |= 1ULL << base->channel;
2479 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2480 index = ctx - ohci->ir_context_list;
2481 ohci->ir_context_mask |= 1 << index;
2482 ohci->ir_context_channels |= ohci->mc_channels;
2483 ohci->mc_channels = 0;
2484 ohci->mc_allocated = false;
2488 spin_unlock_irqrestore(&ohci->lock, flags);
2491 static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
2493 struct fw_ohci *ohci = fw_ohci(base->card);
2494 unsigned long flags;
2497 switch (base->type) {
2498 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2500 spin_lock_irqsave(&ohci->lock, flags);
2502 /* Don't allow multichannel to grab other contexts' channels. */
2503 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
2504 *channels = ohci->ir_context_channels;
2507 set_multichannel_mask(ohci, *channels);
2511 spin_unlock_irqrestore(&ohci->lock, flags);
2521 static int queue_iso_transmit(struct iso_context *ctx,
2522 struct fw_iso_packet *packet,
2523 struct fw_iso_buffer *buffer,
2524 unsigned long payload)
2526 struct descriptor *d, *last, *pd;
2527 struct fw_iso_packet *p;
2529 dma_addr_t d_bus, page_bus;
2530 u32 z, header_z, payload_z, irq;
2531 u32 payload_index, payload_end_index, next_page_index;
2532 int page, end_page, i, length, offset;
2535 payload_index = payload;
2541 if (p->header_length > 0)
2544 /* Determine the first page the payload isn't contained in. */
2545 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
2546 if (p->payload_length > 0)
2547 payload_z = end_page - (payload_index >> PAGE_SHIFT);
2553 /* Get header size in number of descriptors. */
2554 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
2556 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
2561 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
2562 d[0].req_count = cpu_to_le16(8);
2564 * Link the skip address to this descriptor itself. This causes
2565 * a context to skip a cycle whenever lost cycles or FIFO
2566 * overruns occur, without dropping the data. The application
2567 * should then decide whether this is an error condition or not.
2568 * FIXME: Make the context's cycle-lost behaviour configurable?
2570 d[0].branch_address = cpu_to_le32(d_bus | z);
2572 header = (__le32 *) &d[1];
2573 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
2574 IT_HEADER_TAG(p->tag) |
2575 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
2576 IT_HEADER_CHANNEL(ctx->base.channel) |
2577 IT_HEADER_SPEED(ctx->base.speed));
2579 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
2580 p->payload_length));
2583 if (p->header_length > 0) {
2584 d[2].req_count = cpu_to_le16(p->header_length);
2585 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
2586 memcpy(&d[z], p->header, p->header_length);
2589 pd = d + z - payload_z;
2590 payload_end_index = payload_index + p->payload_length;
2591 for (i = 0; i < payload_z; i++) {
2592 page = payload_index >> PAGE_SHIFT;
2593 offset = payload_index & ~PAGE_MASK;
2594 next_page_index = (page + 1) << PAGE_SHIFT;
2596 min(next_page_index, payload_end_index) - payload_index;
2597 pd[i].req_count = cpu_to_le16(length);
2599 page_bus = page_private(buffer->pages[page]);
2600 pd[i].data_address = cpu_to_le32(page_bus + offset);
2602 payload_index += length;
2606 irq = DESCRIPTOR_IRQ_ALWAYS;
2608 irq = DESCRIPTOR_NO_IRQ;
2610 last = z == 2 ? d : d + z - 1;
2611 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
2613 DESCRIPTOR_BRANCH_ALWAYS |
2616 context_append(&ctx->context, d, z, header_z);
2621 static int queue_iso_packet_per_buffer(struct iso_context *ctx,
2622 struct fw_iso_packet *packet,
2623 struct fw_iso_buffer *buffer,
2624 unsigned long payload)
2626 struct descriptor *d, *pd;
2627 dma_addr_t d_bus, page_bus;
2628 u32 z, header_z, rest;
2630 int page, offset, packet_count, header_size, payload_per_buffer;
2633 * The OHCI controller puts the isochronous header and trailer in the
2634 * buffer, so we need at least 8 bytes.
2636 packet_count = packet->header_length / ctx->base.header_size;
2637 header_size = max(ctx->base.header_size, (size_t)8);
2639 /* Get header size in number of descriptors. */
2640 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2641 page = payload >> PAGE_SHIFT;
2642 offset = payload & ~PAGE_MASK;
2643 payload_per_buffer = packet->payload_length / packet_count;
2645 for (i = 0; i < packet_count; i++) {
2646 /* d points to the header descriptor */
2647 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
2648 d = context_get_descriptors(&ctx->context,
2649 z + header_z, &d_bus);
2653 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
2654 DESCRIPTOR_INPUT_MORE);
2655 if (packet->skip && i == 0)
2656 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2657 d->req_count = cpu_to_le16(header_size);
2658 d->res_count = d->req_count;
2659 d->transfer_status = 0;
2660 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
2662 rest = payload_per_buffer;
2664 for (j = 1; j < z; j++) {
2666 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2667 DESCRIPTOR_INPUT_MORE);
2669 if (offset + rest < PAGE_SIZE)
2672 length = PAGE_SIZE - offset;
2673 pd->req_count = cpu_to_le16(length);
2674 pd->res_count = pd->req_count;
2675 pd->transfer_status = 0;
2677 page_bus = page_private(buffer->pages[page]);
2678 pd->data_address = cpu_to_le32(page_bus + offset);
2680 offset = (offset + length) & ~PAGE_MASK;
2685 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2686 DESCRIPTOR_INPUT_LAST |
2687 DESCRIPTOR_BRANCH_ALWAYS);
2688 if (packet->interrupt && i == packet_count - 1)
2689 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2691 context_append(&ctx->context, d, z, header_z);
2697 static int queue_iso_buffer_fill(struct iso_context *ctx,
2698 struct fw_iso_packet *packet,
2699 struct fw_iso_buffer *buffer,
2700 unsigned long payload)
2702 struct descriptor *d;
2703 dma_addr_t d_bus, page_bus;
2704 int page, offset, rest, z, i, length;
2706 page = payload >> PAGE_SHIFT;
2707 offset = payload & ~PAGE_MASK;
2708 rest = packet->payload_length;
2710 /* We need one descriptor for each page in the buffer. */
2711 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
2713 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
2716 for (i = 0; i < z; i++) {
2717 d = context_get_descriptors(&ctx->context, 1, &d_bus);
2721 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
2722 DESCRIPTOR_BRANCH_ALWAYS);
2723 if (packet->skip && i == 0)
2724 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2725 if (packet->interrupt && i == z - 1)
2726 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2728 if (offset + rest < PAGE_SIZE)
2731 length = PAGE_SIZE - offset;
2732 d->req_count = cpu_to_le16(length);
2733 d->res_count = d->req_count;
2734 d->transfer_status = 0;
2736 page_bus = page_private(buffer->pages[page]);
2737 d->data_address = cpu_to_le32(page_bus + offset);
2743 context_append(&ctx->context, d, 1, 0);
2749 static int ohci_queue_iso(struct fw_iso_context *base,
2750 struct fw_iso_packet *packet,
2751 struct fw_iso_buffer *buffer,
2752 unsigned long payload)
2754 struct iso_context *ctx = container_of(base, struct iso_context, base);
2755 unsigned long flags;
2758 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2759 switch (base->type) {
2760 case FW_ISO_CONTEXT_TRANSMIT:
2761 ret = queue_iso_transmit(ctx, packet, buffer, payload);
2763 case FW_ISO_CONTEXT_RECEIVE:
2764 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
2766 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2767 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
2770 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2775 static const struct fw_card_driver ohci_driver = {
2776 .enable = ohci_enable,
2777 .read_phy_reg = ohci_read_phy_reg,
2778 .update_phy_reg = ohci_update_phy_reg,
2779 .set_config_rom = ohci_set_config_rom,
2780 .send_request = ohci_send_request,
2781 .send_response = ohci_send_response,
2782 .cancel_packet = ohci_cancel_packet,
2783 .enable_phys_dma = ohci_enable_phys_dma,
2784 .read_csr = ohci_read_csr,
2785 .write_csr = ohci_write_csr,
2787 .allocate_iso_context = ohci_allocate_iso_context,
2788 .free_iso_context = ohci_free_iso_context,
2789 .set_iso_channels = ohci_set_iso_channels,
2790 .queue_iso = ohci_queue_iso,
2791 .start_iso = ohci_start_iso,
2792 .stop_iso = ohci_stop_iso,
2795 #ifdef CONFIG_PPC_PMAC
2796 static void pmac_ohci_on(struct pci_dev *dev)
2798 if (machine_is(powermac)) {
2799 struct device_node *ofn = pci_device_to_OF_node(dev);
2802 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
2803 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
2808 static void pmac_ohci_off(struct pci_dev *dev)
2810 if (machine_is(powermac)) {
2811 struct device_node *ofn = pci_device_to_OF_node(dev);
2814 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
2815 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
2820 static inline void pmac_ohci_on(struct pci_dev *dev) {}
2821 static inline void pmac_ohci_off(struct pci_dev *dev) {}
2822 #endif /* CONFIG_PPC_PMAC */
2824 static int __devinit pci_probe(struct pci_dev *dev,
2825 const struct pci_device_id *ent)
2827 struct fw_ohci *ohci;
2828 u32 bus_options, max_receive, link_speed, version, link_enh;
2830 int i, err, n_ir, n_it;
2833 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
2839 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
2843 err = pci_enable_device(dev);
2845 fw_error("Failed to enable OHCI hardware\n");
2849 pci_set_master(dev);
2850 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
2851 pci_set_drvdata(dev, ohci);
2853 spin_lock_init(&ohci->lock);
2854 mutex_init(&ohci->phy_reg_mutex);
2856 tasklet_init(&ohci->bus_reset_tasklet,
2857 bus_reset_tasklet, (unsigned long)ohci);
2859 err = pci_request_region(dev, 0, ohci_driver_name);
2861 fw_error("MMIO resource unavailable\n");
2865 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
2866 if (ohci->registers == NULL) {
2867 fw_error("Failed to remap registers\n");
2872 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
2873 if (ohci_quirks[i].vendor == dev->vendor &&
2874 (ohci_quirks[i].device == dev->device ||
2875 ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) {
2876 ohci->quirks = ohci_quirks[i].flags;
2880 ohci->quirks = param_quirks;
2882 /* TI OHCI-Lynx and compatible: set recommended configuration bits. */
2883 if (dev->vendor == PCI_VENDOR_ID_TI) {
2884 pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
2886 /* adjust latency of ATx FIFO: use 1.7 KB threshold */
2887 link_enh &= ~TI_LinkEnh_atx_thresh_mask;
2888 link_enh |= TI_LinkEnh_atx_thresh_1_7K;
2890 /* use priority arbitration for asynchronous responses */
2891 link_enh |= TI_LinkEnh_enab_unfair;
2893 /* required for aPhyEnhanceEnable to work */
2894 link_enh |= TI_LinkEnh_enab_accel;
2896 pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
2899 ar_context_init(&ohci->ar_request_ctx, ohci,
2900 OHCI1394_AsReqRcvContextControlSet);
2902 ar_context_init(&ohci->ar_response_ctx, ohci,
2903 OHCI1394_AsRspRcvContextControlSet);
2905 context_init(&ohci->at_request_ctx, ohci,
2906 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
2908 context_init(&ohci->at_response_ctx, ohci,
2909 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
2911 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
2912 ohci->ir_context_channels = ~0ULL;
2913 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
2914 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
2915 n_ir = hweight32(ohci->ir_context_mask);
2916 size = sizeof(struct iso_context) * n_ir;
2917 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2919 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2920 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2921 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2922 n_it = hweight32(ohci->it_context_mask);
2923 size = sizeof(struct iso_context) * n_it;
2924 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2926 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
2931 /* self-id dma buffer allocation */
2932 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
2936 if (ohci->self_id_cpu == NULL) {
2941 bus_options = reg_read(ohci, OHCI1394_BusOptions);
2942 max_receive = (bus_options >> 12) & 0xf;
2943 link_speed = bus_options & 0x7;
2944 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
2945 reg_read(ohci, OHCI1394_GUIDLo);
2947 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
2951 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2952 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
2953 "%d IR + %d IT contexts, quirks 0x%x\n",
2954 dev_name(&dev->dev), version >> 16, version & 0xff,
2955 n_ir, n_it, ohci->quirks);
2960 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2961 ohci->self_id_cpu, ohci->self_id_bus);
2963 kfree(ohci->ir_context_list);
2964 kfree(ohci->it_context_list);
2965 context_release(&ohci->at_response_ctx);
2966 context_release(&ohci->at_request_ctx);
2967 ar_context_release(&ohci->ar_response_ctx);
2968 ar_context_release(&ohci->ar_request_ctx);
2969 pci_iounmap(dev, ohci->registers);
2971 pci_release_region(dev, 0);
2973 pci_disable_device(dev);
2979 fw_error("Out of memory\n");
2984 static void pci_remove(struct pci_dev *dev)
2986 struct fw_ohci *ohci;
2988 ohci = pci_get_drvdata(dev);
2989 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2991 fw_core_remove_card(&ohci->card);
2994 * FIXME: Fail all pending packets here, now that the upper
2995 * layers can't queue any more.
2998 software_reset(ohci);
2999 free_irq(dev->irq, ohci);
3001 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
3002 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3003 ohci->next_config_rom, ohci->next_config_rom_bus);
3004 if (ohci->config_rom)
3005 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3006 ohci->config_rom, ohci->config_rom_bus);
3007 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
3008 ohci->self_id_cpu, ohci->self_id_bus);
3009 ar_context_release(&ohci->ar_request_ctx);
3010 ar_context_release(&ohci->ar_response_ctx);
3011 context_release(&ohci->at_request_ctx);
3012 context_release(&ohci->at_response_ctx);
3013 kfree(ohci->it_context_list);
3014 kfree(ohci->ir_context_list);
3015 pci_disable_msi(dev);
3016 pci_iounmap(dev, ohci->registers);
3017 pci_release_region(dev, 0);
3018 pci_disable_device(dev);
3022 fw_notify("Removed fw-ohci device.\n");
3026 static int pci_suspend(struct pci_dev *dev, pm_message_t state)
3028 struct fw_ohci *ohci = pci_get_drvdata(dev);
3031 software_reset(ohci);
3032 free_irq(dev->irq, ohci);
3033 pci_disable_msi(dev);
3034 err = pci_save_state(dev);
3036 fw_error("pci_save_state failed\n");
3039 err = pci_set_power_state(dev, pci_choose_state(dev, state));
3041 fw_error("pci_set_power_state failed with %d\n", err);
3047 static int pci_resume(struct pci_dev *dev)
3049 struct fw_ohci *ohci = pci_get_drvdata(dev);
3053 pci_set_power_state(dev, PCI_D0);
3054 pci_restore_state(dev);
3055 err = pci_enable_device(dev);
3057 fw_error("pci_enable_device failed\n");
3061 return ohci_enable(&ohci->card, NULL, 0);
3065 static const struct pci_device_id pci_table[] = {
3066 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
3070 MODULE_DEVICE_TABLE(pci, pci_table);
3072 static struct pci_driver fw_ohci_pci_driver = {
3073 .name = ohci_driver_name,
3074 .id_table = pci_table,
3076 .remove = pci_remove,
3078 .resume = pci_resume,
3079 .suspend = pci_suspend,
3083 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3084 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3085 MODULE_LICENSE("GPL");
3087 /* Provide a module alias so root-on-sbp2 initrds don't break. */
3088 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
3089 MODULE_ALIAS("ohci1394");
3092 static int __init fw_ohci_init(void)
3094 return pci_register_driver(&fw_ohci_pci_driver);
3097 static void __exit fw_ohci_cleanup(void)
3099 pci_unregister_driver(&fw_ohci_pci_driver);
3102 module_init(fw_ohci_init);
3103 module_exit(fw_ohci_cleanup);