2 * Driver for OHCI 1394 controllers
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/compiler.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/firewire.h>
26 #include <linux/firewire-constants.h>
27 #include <linux/gfp.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
31 #include <linux/kernel.h>
32 #include <linux/list.h>
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/pci.h>
37 #include <linux/pci_ids.h>
38 #include <linux/spinlock.h>
39 #include <linux/string.h>
41 #include <asm/byteorder.h>
43 #include <asm/system.h>
45 #ifdef CONFIG_PPC_PMAC
46 #include <asm/pmac_feature.h>
52 #define DESCRIPTOR_OUTPUT_MORE 0
53 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
54 #define DESCRIPTOR_INPUT_MORE (2 << 12)
55 #define DESCRIPTOR_INPUT_LAST (3 << 12)
56 #define DESCRIPTOR_STATUS (1 << 11)
57 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
58 #define DESCRIPTOR_PING (1 << 7)
59 #define DESCRIPTOR_YY (1 << 6)
60 #define DESCRIPTOR_NO_IRQ (0 << 4)
61 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
62 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
63 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
64 #define DESCRIPTOR_WAIT (3 << 0)
70 __le32 branch_address;
72 __le16 transfer_status;
73 } __attribute__((aligned(16)));
75 #define CONTROL_SET(regs) (regs)
76 #define CONTROL_CLEAR(regs) ((regs) + 4)
77 #define COMMAND_PTR(regs) ((regs) + 12)
78 #define CONTEXT_MATCH(regs) ((regs) + 16)
81 struct descriptor descriptor;
82 struct ar_buffer *next;
88 struct ar_buffer *current_buffer;
89 struct ar_buffer *last_buffer;
92 struct tasklet_struct tasklet;
97 typedef int (*descriptor_callback_t)(struct context *ctx,
99 struct descriptor *last);
102 * A buffer that contains a block of DMA-able coherent memory used for
103 * storing a portion of a DMA descriptor program.
105 struct descriptor_buffer {
106 struct list_head list;
107 dma_addr_t buffer_bus;
110 struct descriptor buffer[0];
114 struct fw_ohci *ohci;
116 int total_allocation;
119 * List of page-sized buffers for storing DMA descriptors.
120 * Head of list contains buffers in use and tail of list contains
123 struct list_head buffer_list;
126 * Pointer to a buffer inside buffer_list that contains the tail
127 * end of the current DMA program.
129 struct descriptor_buffer *buffer_tail;
132 * The descriptor containing the branch address of the first
133 * descriptor that has not yet been filled by the device.
135 struct descriptor *last;
138 * The last descriptor in the DMA program. It contains the branch
139 * address that must be updated upon appending a new descriptor.
141 struct descriptor *prev;
143 descriptor_callback_t callback;
145 struct tasklet_struct tasklet;
148 #define IT_HEADER_SY(v) ((v) << 0)
149 #define IT_HEADER_TCODE(v) ((v) << 4)
150 #define IT_HEADER_CHANNEL(v) ((v) << 8)
151 #define IT_HEADER_TAG(v) ((v) << 14)
152 #define IT_HEADER_SPEED(v) ((v) << 16)
153 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
156 struct fw_iso_context base;
157 struct context context;
160 size_t header_length;
163 #define CONFIG_ROM_SIZE 1024
168 __iomem char *registers;
171 int request_generation; /* for timestamping incoming requests */
174 bool bus_reset_packet_quirk;
175 bool iso_cycle_timer_quirk;
178 * Spinlock for accessing fw_ohci data. Never call out of
179 * this driver with this lock held.
183 struct ar_context ar_request_ctx;
184 struct ar_context ar_response_ctx;
185 struct context at_request_ctx;
186 struct context at_response_ctx;
189 struct iso_context *it_context_list;
190 u64 ir_context_channels;
192 struct iso_context *ir_context_list;
195 dma_addr_t config_rom_bus;
196 __be32 *next_config_rom;
197 dma_addr_t next_config_rom_bus;
201 dma_addr_t self_id_bus;
202 struct tasklet_struct bus_reset_tasklet;
204 u32 self_id_buffer[512];
207 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
209 return container_of(card, struct fw_ohci, card);
212 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
213 #define IR_CONTEXT_BUFFER_FILL 0x80000000
214 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
215 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
216 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
217 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
219 #define CONTEXT_RUN 0x8000
220 #define CONTEXT_WAKE 0x1000
221 #define CONTEXT_DEAD 0x0800
222 #define CONTEXT_ACTIVE 0x0400
224 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
225 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
226 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
228 #define OHCI1394_REGISTER_SIZE 0x800
229 #define OHCI_LOOP_COUNT 500
230 #define OHCI1394_PCI_HCI_Control 0x40
231 #define SELF_ID_BUF_SIZE 0x800
232 #define OHCI_TCODE_PHY_PACKET 0x0e
233 #define OHCI_VERSION_1_1 0x010010
235 static char ohci_driver_name[] = KBUILD_MODNAME;
237 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
239 #define OHCI_PARAM_DEBUG_AT_AR 1
240 #define OHCI_PARAM_DEBUG_SELFIDS 2
241 #define OHCI_PARAM_DEBUG_IRQS 4
242 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
244 static int param_debug;
245 module_param_named(debug, param_debug, int, 0644);
246 MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
247 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
248 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
249 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
250 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
251 ", or a combination, or all = -1)");
253 static void log_irqs(u32 evt)
255 if (likely(!(param_debug &
256 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
259 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
260 !(evt & OHCI1394_busReset))
263 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
264 evt & OHCI1394_selfIDComplete ? " selfID" : "",
265 evt & OHCI1394_RQPkt ? " AR_req" : "",
266 evt & OHCI1394_RSPkt ? " AR_resp" : "",
267 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
268 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
269 evt & OHCI1394_isochRx ? " IR" : "",
270 evt & OHCI1394_isochTx ? " IT" : "",
271 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
272 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
273 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
274 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
275 evt & OHCI1394_busReset ? " busReset" : "",
276 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
277 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
278 OHCI1394_respTxComplete | OHCI1394_isochRx |
279 OHCI1394_isochTx | OHCI1394_postedWriteErr |
280 OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent |
281 OHCI1394_regAccessFail | OHCI1394_busReset)
285 static const char *speed[] = {
286 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
288 static const char *power[] = {
289 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
290 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
292 static const char port[] = { '.', '-', 'p', 'c', };
294 static char _p(u32 *s, int shift)
296 return port[*s >> shift & 3];
299 static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
301 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
304 fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
305 self_id_count, generation, node_id);
307 for (; self_id_count--; ++s)
308 if ((*s & 1 << 23) == 0)
309 fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
310 "%s gc=%d %s %s%s%s\n",
311 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
312 speed[*s >> 14 & 3], *s >> 16 & 63,
313 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
314 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
316 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
318 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
319 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
322 static const char *evts[] = {
323 [0x00] = "evt_no_status", [0x01] = "-reserved-",
324 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
325 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
326 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
327 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
328 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
329 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
330 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
331 [0x10] = "-reserved-", [0x11] = "ack_complete",
332 [0x12] = "ack_pending ", [0x13] = "-reserved-",
333 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
334 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
335 [0x18] = "-reserved-", [0x19] = "-reserved-",
336 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
337 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
338 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
339 [0x20] = "pending/cancelled",
341 static const char *tcodes[] = {
342 [0x0] = "QW req", [0x1] = "BW req",
343 [0x2] = "W resp", [0x3] = "-reserved-",
344 [0x4] = "QR req", [0x5] = "BR req",
345 [0x6] = "QR resp", [0x7] = "BR resp",
346 [0x8] = "cycle start", [0x9] = "Lk req",
347 [0xa] = "async stream packet", [0xb] = "Lk resp",
348 [0xc] = "-reserved-", [0xd] = "-reserved-",
349 [0xe] = "link internal", [0xf] = "-reserved-",
351 static const char *phys[] = {
352 [0x0] = "phy config packet", [0x1] = "link-on packet",
353 [0x2] = "self-id packet", [0x3] = "-reserved-",
356 static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
358 int tcode = header[0] >> 4 & 0xf;
361 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
364 if (unlikely(evt >= ARRAY_SIZE(evts)))
367 if (evt == OHCI1394_evt_bus_reset) {
368 fw_notify("A%c evt_bus_reset, generation %d\n",
369 dir, (header[2] >> 16) & 0xff);
373 if (header[0] == ~header[1]) {
374 fw_notify("A%c %s, %s, %08x\n",
375 dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
380 case 0x0: case 0x6: case 0x8:
381 snprintf(specific, sizeof(specific), " = %08x",
382 be32_to_cpu((__force __be32)header[3]));
384 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
385 snprintf(specific, sizeof(specific), " %x,%x",
386 header[3] >> 16, header[3] & 0xffff);
394 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
396 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
397 fw_notify("A%c spd %x tl %02x, "
400 dir, speed, header[0] >> 10 & 0x3f,
401 header[1] >> 16, header[0] >> 16, evts[evt],
402 tcodes[tcode], header[1] & 0xffff, header[2], specific);
405 fw_notify("A%c spd %x tl %02x, "
408 dir, speed, header[0] >> 10 & 0x3f,
409 header[1] >> 16, header[0] >> 16, evts[evt],
410 tcodes[tcode], specific);
416 #define log_irqs(evt)
417 #define log_selfids(node_id, generation, self_id_count, sid)
418 #define log_ar_at_event(dir, speed, header, evt)
420 #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
422 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
424 writel(data, ohci->registers + offset);
427 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
429 return readl(ohci->registers + offset);
432 static inline void flush_writes(const struct fw_ohci *ohci)
434 /* Do a dummy read to flush writes. */
435 reg_read(ohci, OHCI1394_Version);
438 static int ohci_update_phy_reg(struct fw_card *card, int addr,
439 int clear_bits, int set_bits)
441 struct fw_ohci *ohci = fw_ohci(card);
444 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
447 val = reg_read(ohci, OHCI1394_PhyControl);
448 if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
449 fw_error("failed to set phy reg bits.\n");
453 old = OHCI1394_PhyControl_ReadData(val);
454 old = (old & ~clear_bits) | set_bits;
455 reg_write(ohci, OHCI1394_PhyControl,
456 OHCI1394_PhyControl_Write(addr, old));
461 static int ar_context_add_page(struct ar_context *ctx)
463 struct device *dev = ctx->ohci->card.device;
464 struct ar_buffer *ab;
465 dma_addr_t uninitialized_var(ab_bus);
468 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
473 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
474 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
476 DESCRIPTOR_BRANCH_ALWAYS);
477 offset = offsetof(struct ar_buffer, data);
478 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
479 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
480 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
481 ab->descriptor.branch_address = 0;
483 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
484 ctx->last_buffer->next = ab;
485 ctx->last_buffer = ab;
487 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
488 flush_writes(ctx->ohci);
493 static void ar_context_release(struct ar_context *ctx)
495 struct ar_buffer *ab, *ab_next;
499 for (ab = ctx->current_buffer; ab; ab = ab_next) {
501 offset = offsetof(struct ar_buffer, data);
502 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
503 dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
508 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
509 #define cond_le32_to_cpu(v) \
510 (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
512 #define cond_le32_to_cpu(v) le32_to_cpu(v)
515 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
517 struct fw_ohci *ohci = ctx->ohci;
519 u32 status, length, tcode;
522 p.header[0] = cond_le32_to_cpu(buffer[0]);
523 p.header[1] = cond_le32_to_cpu(buffer[1]);
524 p.header[2] = cond_le32_to_cpu(buffer[2]);
526 tcode = (p.header[0] >> 4) & 0x0f;
528 case TCODE_WRITE_QUADLET_REQUEST:
529 case TCODE_READ_QUADLET_RESPONSE:
530 p.header[3] = (__force __u32) buffer[3];
531 p.header_length = 16;
532 p.payload_length = 0;
535 case TCODE_READ_BLOCK_REQUEST :
536 p.header[3] = cond_le32_to_cpu(buffer[3]);
537 p.header_length = 16;
538 p.payload_length = 0;
541 case TCODE_WRITE_BLOCK_REQUEST:
542 case TCODE_READ_BLOCK_RESPONSE:
543 case TCODE_LOCK_REQUEST:
544 case TCODE_LOCK_RESPONSE:
545 p.header[3] = cond_le32_to_cpu(buffer[3]);
546 p.header_length = 16;
547 p.payload_length = p.header[3] >> 16;
550 case TCODE_WRITE_RESPONSE:
551 case TCODE_READ_QUADLET_REQUEST:
552 case OHCI_TCODE_PHY_PACKET:
553 p.header_length = 12;
554 p.payload_length = 0;
558 /* FIXME: Stop context, discard everything, and restart? */
560 p.payload_length = 0;
563 p.payload = (void *) buffer + p.header_length;
565 /* FIXME: What to do about evt_* errors? */
566 length = (p.header_length + p.payload_length + 3) / 4;
567 status = cond_le32_to_cpu(buffer[length]);
568 evt = (status >> 16) & 0x1f;
571 p.speed = (status >> 21) & 0x7;
572 p.timestamp = status & 0xffff;
573 p.generation = ohci->request_generation;
575 log_ar_at_event('R', p.speed, p.header, evt);
578 * The OHCI bus reset handler synthesizes a phy packet with
579 * the new generation number when a bus reset happens (see
580 * section 8.4.2.3). This helps us determine when a request
581 * was received and make sure we send the response in the same
582 * generation. We only need this for requests; for responses
583 * we use the unique tlabel for finding the matching
586 * Alas some chips sometimes emit bus reset packets with a
587 * wrong generation. We set the correct generation for these
588 * at a slightly incorrect time (in bus_reset_tasklet).
590 if (evt == OHCI1394_evt_bus_reset) {
591 if (!ohci->bus_reset_packet_quirk)
592 ohci->request_generation = (p.header[2] >> 16) & 0xff;
593 } else if (ctx == &ohci->ar_request_ctx) {
594 fw_core_handle_request(&ohci->card, &p);
596 fw_core_handle_response(&ohci->card, &p);
599 return buffer + length + 1;
602 static void ar_context_tasklet(unsigned long data)
604 struct ar_context *ctx = (struct ar_context *)data;
605 struct fw_ohci *ohci = ctx->ohci;
606 struct ar_buffer *ab;
607 struct descriptor *d;
610 ab = ctx->current_buffer;
613 if (d->res_count == 0) {
614 size_t size, rest, offset;
615 dma_addr_t start_bus;
619 * This descriptor is finished and we may have a
620 * packet split across this and the next buffer. We
621 * reuse the page for reassembling the split packet.
624 offset = offsetof(struct ar_buffer, data);
626 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
630 size = buffer + PAGE_SIZE - ctx->pointer;
631 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
632 memmove(buffer, ctx->pointer, size);
633 memcpy(buffer + size, ab->data, rest);
634 ctx->current_buffer = ab;
635 ctx->pointer = (void *) ab->data + rest;
636 end = buffer + size + rest;
639 buffer = handle_ar_packet(ctx, buffer);
641 dma_free_coherent(ohci->card.device, PAGE_SIZE,
643 ar_context_add_page(ctx);
645 buffer = ctx->pointer;
647 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
650 buffer = handle_ar_packet(ctx, buffer);
654 static int ar_context_init(struct ar_context *ctx,
655 struct fw_ohci *ohci, u32 regs)
661 ctx->last_buffer = &ab;
662 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
664 ar_context_add_page(ctx);
665 ar_context_add_page(ctx);
666 ctx->current_buffer = ab.next;
667 ctx->pointer = ctx->current_buffer->data;
672 static void ar_context_run(struct ar_context *ctx)
674 struct ar_buffer *ab = ctx->current_buffer;
678 offset = offsetof(struct ar_buffer, data);
679 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
681 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
682 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
683 flush_writes(ctx->ohci);
686 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
690 b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
691 key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
693 /* figure out which descriptor the branch address goes in */
694 if (z == 2 && (b == 3 || key == 2))
700 static void context_tasklet(unsigned long data)
702 struct context *ctx = (struct context *) data;
703 struct descriptor *d, *last;
706 struct descriptor_buffer *desc;
708 desc = list_entry(ctx->buffer_list.next,
709 struct descriptor_buffer, list);
711 while (last->branch_address != 0) {
712 struct descriptor_buffer *old_desc = desc;
713 address = le32_to_cpu(last->branch_address);
717 /* If the branch address points to a buffer outside of the
718 * current buffer, advance to the next buffer. */
719 if (address < desc->buffer_bus ||
720 address >= desc->buffer_bus + desc->used)
721 desc = list_entry(desc->list.next,
722 struct descriptor_buffer, list);
723 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
724 last = find_branch_descriptor(d, z);
726 if (!ctx->callback(ctx, d, last))
729 if (old_desc != desc) {
730 /* If we've advanced to the next buffer, move the
731 * previous buffer to the free list. */
734 spin_lock_irqsave(&ctx->ohci->lock, flags);
735 list_move_tail(&old_desc->list, &ctx->buffer_list);
736 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
743 * Allocate a new buffer and add it to the list of free buffers for this
744 * context. Must be called with ohci->lock held.
746 static int context_add_buffer(struct context *ctx)
748 struct descriptor_buffer *desc;
749 dma_addr_t uninitialized_var(bus_addr);
753 * 16MB of descriptors should be far more than enough for any DMA
754 * program. This will catch run-away userspace or DoS attacks.
756 if (ctx->total_allocation >= 16*1024*1024)
759 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
760 &bus_addr, GFP_ATOMIC);
764 offset = (void *)&desc->buffer - (void *)desc;
765 desc->buffer_size = PAGE_SIZE - offset;
766 desc->buffer_bus = bus_addr + offset;
769 list_add_tail(&desc->list, &ctx->buffer_list);
770 ctx->total_allocation += PAGE_SIZE;
775 static int context_init(struct context *ctx, struct fw_ohci *ohci,
776 u32 regs, descriptor_callback_t callback)
780 ctx->total_allocation = 0;
782 INIT_LIST_HEAD(&ctx->buffer_list);
783 if (context_add_buffer(ctx) < 0)
786 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
787 struct descriptor_buffer, list);
789 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
790 ctx->callback = callback;
793 * We put a dummy descriptor in the buffer that has a NULL
794 * branch address and looks like it's been sent. That way we
795 * have a descriptor to append DMA programs to.
797 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
798 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
799 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
800 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
801 ctx->last = ctx->buffer_tail->buffer;
802 ctx->prev = ctx->buffer_tail->buffer;
807 static void context_release(struct context *ctx)
809 struct fw_card *card = &ctx->ohci->card;
810 struct descriptor_buffer *desc, *tmp;
812 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
813 dma_free_coherent(card->device, PAGE_SIZE, desc,
815 ((void *)&desc->buffer - (void *)desc));
818 /* Must be called with ohci->lock held */
819 static struct descriptor *context_get_descriptors(struct context *ctx,
820 int z, dma_addr_t *d_bus)
822 struct descriptor *d = NULL;
823 struct descriptor_buffer *desc = ctx->buffer_tail;
825 if (z * sizeof(*d) > desc->buffer_size)
828 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
829 /* No room for the descriptor in this buffer, so advance to the
832 if (desc->list.next == &ctx->buffer_list) {
833 /* If there is no free buffer next in the list,
835 if (context_add_buffer(ctx) < 0)
838 desc = list_entry(desc->list.next,
839 struct descriptor_buffer, list);
840 ctx->buffer_tail = desc;
843 d = desc->buffer + desc->used / sizeof(*d);
844 memset(d, 0, z * sizeof(*d));
845 *d_bus = desc->buffer_bus + desc->used;
850 static void context_run(struct context *ctx, u32 extra)
852 struct fw_ohci *ohci = ctx->ohci;
854 reg_write(ohci, COMMAND_PTR(ctx->regs),
855 le32_to_cpu(ctx->last->branch_address));
856 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
857 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
861 static void context_append(struct context *ctx,
862 struct descriptor *d, int z, int extra)
865 struct descriptor_buffer *desc = ctx->buffer_tail;
867 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
869 desc->used += (z + extra) * sizeof(*d);
870 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
871 ctx->prev = find_branch_descriptor(d, z);
873 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
874 flush_writes(ctx->ohci);
877 static void context_stop(struct context *ctx)
882 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
883 flush_writes(ctx->ohci);
885 for (i = 0; i < 10; i++) {
886 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
887 if ((reg & CONTEXT_ACTIVE) == 0)
892 fw_error("Error: DMA context still active (0x%08x)\n", reg);
896 struct fw_packet *packet;
900 * This function apppends a packet to the DMA queue for transmission.
901 * Must always be called with the ochi->lock held to ensure proper
902 * generation handling and locking around packet queue manipulation.
904 static int at_context_queue_packet(struct context *ctx,
905 struct fw_packet *packet)
907 struct fw_ohci *ohci = ctx->ohci;
908 dma_addr_t d_bus, uninitialized_var(payload_bus);
909 struct driver_data *driver_data;
910 struct descriptor *d, *last;
915 d = context_get_descriptors(ctx, 4, &d_bus);
917 packet->ack = RCODE_SEND_ERROR;
921 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
922 d[0].res_count = cpu_to_le16(packet->timestamp);
925 * The DMA format for asyncronous link packets is different
926 * from the IEEE1394 layout, so shift the fields around
927 * accordingly. If header_length is 8, it's a PHY packet, to
928 * which we need to prepend an extra quadlet.
931 header = (__le32 *) &d[1];
932 switch (packet->header_length) {
935 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
936 (packet->speed << 16));
937 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
938 (packet->header[0] & 0xffff0000));
939 header[2] = cpu_to_le32(packet->header[2]);
941 tcode = (packet->header[0] >> 4) & 0x0f;
942 if (TCODE_IS_BLOCK_PACKET(tcode))
943 header[3] = cpu_to_le32(packet->header[3]);
945 header[3] = (__force __le32) packet->header[3];
947 d[0].req_count = cpu_to_le16(packet->header_length);
951 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
952 (packet->speed << 16));
953 header[1] = cpu_to_le32(packet->header[0]);
954 header[2] = cpu_to_le32(packet->header[1]);
955 d[0].req_count = cpu_to_le16(12);
959 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
960 (packet->speed << 16));
961 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
962 d[0].req_count = cpu_to_le16(8);
967 packet->ack = RCODE_SEND_ERROR;
971 driver_data = (struct driver_data *) &d[3];
972 driver_data->packet = packet;
973 packet->driver_data = driver_data;
975 if (packet->payload_length > 0) {
977 dma_map_single(ohci->card.device, packet->payload,
978 packet->payload_length, DMA_TO_DEVICE);
979 if (dma_mapping_error(ohci->card.device, payload_bus)) {
980 packet->ack = RCODE_SEND_ERROR;
983 packet->payload_bus = payload_bus;
984 packet->payload_mapped = true;
986 d[2].req_count = cpu_to_le16(packet->payload_length);
987 d[2].data_address = cpu_to_le32(payload_bus);
995 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
996 DESCRIPTOR_IRQ_ALWAYS |
997 DESCRIPTOR_BRANCH_ALWAYS);
1000 * If the controller and packet generations don't match, we need to
1001 * bail out and try again. If IntEvent.busReset is set, the AT context
1002 * is halted, so appending to the context and trying to run it is
1003 * futile. Most controllers do the right thing and just flush the AT
1004 * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
1005 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
1006 * up stalling out. So we just bail out in software and try again
1007 * later, and everyone is happy.
1008 * FIXME: Document how the locking works.
1010 if (ohci->generation != packet->generation ||
1011 reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
1012 if (packet->payload_mapped)
1013 dma_unmap_single(ohci->card.device, payload_bus,
1014 packet->payload_length, DMA_TO_DEVICE);
1015 packet->ack = RCODE_GENERATION;
1019 context_append(ctx, d, z, 4 - z);
1021 /* If the context isn't already running, start it up. */
1022 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
1023 if ((reg & CONTEXT_RUN) == 0)
1024 context_run(ctx, 0);
1029 static int handle_at_packet(struct context *context,
1030 struct descriptor *d,
1031 struct descriptor *last)
1033 struct driver_data *driver_data;
1034 struct fw_packet *packet;
1035 struct fw_ohci *ohci = context->ohci;
1038 if (last->transfer_status == 0)
1039 /* This descriptor isn't done yet, stop iteration. */
1042 driver_data = (struct driver_data *) &d[3];
1043 packet = driver_data->packet;
1045 /* This packet was cancelled, just continue. */
1048 if (packet->payload_mapped)
1049 dma_unmap_single(ohci->card.device, packet->payload_bus,
1050 packet->payload_length, DMA_TO_DEVICE);
1052 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1053 packet->timestamp = le16_to_cpu(last->res_count);
1055 log_ar_at_event('T', packet->speed, packet->header, evt);
1058 case OHCI1394_evt_timeout:
1059 /* Async response transmit timed out. */
1060 packet->ack = RCODE_CANCELLED;
1063 case OHCI1394_evt_flushed:
1065 * The packet was flushed should give same error as
1066 * when we try to use a stale generation count.
1068 packet->ack = RCODE_GENERATION;
1071 case OHCI1394_evt_missing_ack:
1073 * Using a valid (current) generation count, but the
1074 * node is not on the bus or not sending acks.
1076 packet->ack = RCODE_NO_ACK;
1079 case ACK_COMPLETE + 0x10:
1080 case ACK_PENDING + 0x10:
1081 case ACK_BUSY_X + 0x10:
1082 case ACK_BUSY_A + 0x10:
1083 case ACK_BUSY_B + 0x10:
1084 case ACK_DATA_ERROR + 0x10:
1085 case ACK_TYPE_ERROR + 0x10:
1086 packet->ack = evt - 0x10;
1090 packet->ack = RCODE_SEND_ERROR;
1094 packet->callback(packet, &ohci->card, packet->ack);
1099 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1100 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1101 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1102 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1103 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1105 static void handle_local_rom(struct fw_ohci *ohci,
1106 struct fw_packet *packet, u32 csr)
1108 struct fw_packet response;
1109 int tcode, length, i;
1111 tcode = HEADER_GET_TCODE(packet->header[0]);
1112 if (TCODE_IS_BLOCK_PACKET(tcode))
1113 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1117 i = csr - CSR_CONFIG_ROM;
1118 if (i + length > CONFIG_ROM_SIZE) {
1119 fw_fill_response(&response, packet->header,
1120 RCODE_ADDRESS_ERROR, NULL, 0);
1121 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
1122 fw_fill_response(&response, packet->header,
1123 RCODE_TYPE_ERROR, NULL, 0);
1125 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1126 (void *) ohci->config_rom + i, length);
1129 fw_core_handle_response(&ohci->card, &response);
1132 static void handle_local_lock(struct fw_ohci *ohci,
1133 struct fw_packet *packet, u32 csr)
1135 struct fw_packet response;
1136 int tcode, length, ext_tcode, sel;
1137 __be32 *payload, lock_old;
1138 u32 lock_arg, lock_data;
1140 tcode = HEADER_GET_TCODE(packet->header[0]);
1141 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1142 payload = packet->payload;
1143 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
1145 if (tcode == TCODE_LOCK_REQUEST &&
1146 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1147 lock_arg = be32_to_cpu(payload[0]);
1148 lock_data = be32_to_cpu(payload[1]);
1149 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1153 fw_fill_response(&response, packet->header,
1154 RCODE_TYPE_ERROR, NULL, 0);
1158 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1159 reg_write(ohci, OHCI1394_CSRData, lock_data);
1160 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1161 reg_write(ohci, OHCI1394_CSRControl, sel);
1163 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
1164 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
1166 fw_notify("swap not done yet\n");
1168 fw_fill_response(&response, packet->header,
1169 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
1171 fw_core_handle_response(&ohci->card, &response);
1174 static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1179 if (ctx == &ctx->ohci->at_request_ctx) {
1180 packet->ack = ACK_PENDING;
1181 packet->callback(packet, &ctx->ohci->card, packet->ack);
1185 ((unsigned long long)
1186 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1188 csr = offset - CSR_REGISTER_BASE;
1190 /* Handle config rom reads. */
1191 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1192 handle_local_rom(ctx->ohci, packet, csr);
1194 case CSR_BUS_MANAGER_ID:
1195 case CSR_BANDWIDTH_AVAILABLE:
1196 case CSR_CHANNELS_AVAILABLE_HI:
1197 case CSR_CHANNELS_AVAILABLE_LO:
1198 handle_local_lock(ctx->ohci, packet, csr);
1201 if (ctx == &ctx->ohci->at_request_ctx)
1202 fw_core_handle_request(&ctx->ohci->card, packet);
1204 fw_core_handle_response(&ctx->ohci->card, packet);
1208 if (ctx == &ctx->ohci->at_response_ctx) {
1209 packet->ack = ACK_COMPLETE;
1210 packet->callback(packet, &ctx->ohci->card, packet->ack);
1214 static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1216 unsigned long flags;
1219 spin_lock_irqsave(&ctx->ohci->lock, flags);
1221 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1222 ctx->ohci->generation == packet->generation) {
1223 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1224 handle_local_request(ctx, packet);
1228 ret = at_context_queue_packet(ctx, packet);
1229 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1232 packet->callback(packet, &ctx->ohci->card, packet->ack);
1236 static void bus_reset_tasklet(unsigned long data)
1238 struct fw_ohci *ohci = (struct fw_ohci *)data;
1239 int self_id_count, i, j, reg;
1240 int generation, new_generation;
1241 unsigned long flags;
1242 void *free_rom = NULL;
1243 dma_addr_t free_rom_bus = 0;
1245 reg = reg_read(ohci, OHCI1394_NodeID);
1246 if (!(reg & OHCI1394_NodeID_idValid)) {
1247 fw_notify("node ID not valid, new bus reset in progress\n");
1250 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1251 fw_notify("malconfigured bus\n");
1254 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1255 OHCI1394_NodeID_nodeNumber);
1257 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1258 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1259 fw_notify("inconsistent self IDs\n");
1263 * The count in the SelfIDCount register is the number of
1264 * bytes in the self ID receive buffer. Since we also receive
1265 * the inverted quadlets and a header quadlet, we shift one
1266 * bit extra to get the actual number of self IDs.
1268 self_id_count = (reg >> 3) & 0xff;
1269 if (self_id_count == 0 || self_id_count > 252) {
1270 fw_notify("inconsistent self IDs\n");
1273 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1276 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1277 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
1278 fw_notify("inconsistent self IDs\n");
1281 ohci->self_id_buffer[j] =
1282 cond_le32_to_cpu(ohci->self_id_cpu[i]);
1287 * Check the consistency of the self IDs we just read. The
1288 * problem we face is that a new bus reset can start while we
1289 * read out the self IDs from the DMA buffer. If this happens,
1290 * the DMA buffer will be overwritten with new self IDs and we
1291 * will read out inconsistent data. The OHCI specification
1292 * (section 11.2) recommends a technique similar to
1293 * linux/seqlock.h, where we remember the generation of the
1294 * self IDs in the buffer before reading them out and compare
1295 * it to the current generation after reading them out. If
1296 * the two generations match we know we have a consistent set
1300 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1301 if (new_generation != generation) {
1302 fw_notify("recursive bus reset detected, "
1303 "discarding self ids\n");
1307 /* FIXME: Document how the locking works. */
1308 spin_lock_irqsave(&ohci->lock, flags);
1310 ohci->generation = generation;
1311 context_stop(&ohci->at_request_ctx);
1312 context_stop(&ohci->at_response_ctx);
1313 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1315 if (ohci->bus_reset_packet_quirk)
1316 ohci->request_generation = generation;
1319 * This next bit is unrelated to the AT context stuff but we
1320 * have to do it under the spinlock also. If a new config rom
1321 * was set up before this reset, the old one is now no longer
1322 * in use and we can free it. Update the config rom pointers
1323 * to point to the current config rom and clear the
1324 * next_config_rom pointer so a new udpate can take place.
1327 if (ohci->next_config_rom != NULL) {
1328 if (ohci->next_config_rom != ohci->config_rom) {
1329 free_rom = ohci->config_rom;
1330 free_rom_bus = ohci->config_rom_bus;
1332 ohci->config_rom = ohci->next_config_rom;
1333 ohci->config_rom_bus = ohci->next_config_rom_bus;
1334 ohci->next_config_rom = NULL;
1337 * Restore config_rom image and manually update
1338 * config_rom registers. Writing the header quadlet
1339 * will indicate that the config rom is ready, so we
1342 reg_write(ohci, OHCI1394_BusOptions,
1343 be32_to_cpu(ohci->config_rom[2]));
1344 ohci->config_rom[0] = ohci->next_header;
1345 reg_write(ohci, OHCI1394_ConfigROMhdr,
1346 be32_to_cpu(ohci->next_header));
1349 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1350 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
1351 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
1354 spin_unlock_irqrestore(&ohci->lock, flags);
1357 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1358 free_rom, free_rom_bus);
1360 log_selfids(ohci->node_id, generation,
1361 self_id_count, ohci->self_id_buffer);
1363 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
1364 self_id_count, ohci->self_id_buffer);
1367 static irqreturn_t irq_handler(int irq, void *data)
1369 struct fw_ohci *ohci = data;
1370 u32 event, iso_event;
1373 event = reg_read(ohci, OHCI1394_IntEventClear);
1375 if (!event || !~event)
1378 /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
1379 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
1382 if (event & OHCI1394_selfIDComplete)
1383 tasklet_schedule(&ohci->bus_reset_tasklet);
1385 if (event & OHCI1394_RQPkt)
1386 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1388 if (event & OHCI1394_RSPkt)
1389 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1391 if (event & OHCI1394_reqTxComplete)
1392 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1394 if (event & OHCI1394_respTxComplete)
1395 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1397 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1398 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1401 i = ffs(iso_event) - 1;
1402 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
1403 iso_event &= ~(1 << i);
1406 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1407 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1410 i = ffs(iso_event) - 1;
1411 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
1412 iso_event &= ~(1 << i);
1415 if (unlikely(event & OHCI1394_regAccessFail))
1416 fw_error("Register access failure - "
1417 "please notify linux1394-devel@lists.sf.net\n");
1419 if (unlikely(event & OHCI1394_postedWriteErr))
1420 fw_error("PCI posted write error\n");
1422 if (unlikely(event & OHCI1394_cycleTooLong)) {
1423 if (printk_ratelimit())
1424 fw_notify("isochronous cycle too long\n");
1425 reg_write(ohci, OHCI1394_LinkControlSet,
1426 OHCI1394_LinkControl_cycleMaster);
1429 if (unlikely(event & OHCI1394_cycleInconsistent)) {
1431 * We need to clear this event bit in order to make
1432 * cycleMatch isochronous I/O work. In theory we should
1433 * stop active cycleMatch iso contexts now and restart
1434 * them at least two cycles later. (FIXME?)
1436 if (printk_ratelimit())
1437 fw_notify("isochronous cycle inconsistent\n");
1443 static int software_reset(struct fw_ohci *ohci)
1447 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1449 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1450 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1451 OHCI1394_HCControl_softReset) == 0)
1459 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
1461 size_t size = length * 4;
1463 memcpy(dest, src, size);
1464 if (size < CONFIG_ROM_SIZE)
1465 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
1468 static int ohci_enable(struct fw_card *card,
1469 const __be32 *config_rom, size_t length)
1471 struct fw_ohci *ohci = fw_ohci(card);
1472 struct pci_dev *dev = to_pci_dev(card->device);
1476 if (software_reset(ohci)) {
1477 fw_error("Failed to reset ohci card.\n");
1482 * Now enable LPS, which we need in order to start accessing
1483 * most of the registers. In fact, on some cards (ALI M5251),
1484 * accessing registers in the SClk domain without LPS enabled
1485 * will lock up the machine. Wait 50msec to make sure we have
1486 * full link enabled. However, with some cards (well, at least
1487 * a JMicron PCIe card), we have to try again sometimes.
1489 reg_write(ohci, OHCI1394_HCControlSet,
1490 OHCI1394_HCControl_LPS |
1491 OHCI1394_HCControl_postedWriteEnable);
1494 for (lps = 0, i = 0; !lps && i < 3; i++) {
1496 lps = reg_read(ohci, OHCI1394_HCControlSet) &
1497 OHCI1394_HCControl_LPS;
1501 fw_error("Failed to set Link Power Status\n");
1505 reg_write(ohci, OHCI1394_HCControlClear,
1506 OHCI1394_HCControl_noByteSwapData);
1508 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1509 reg_write(ohci, OHCI1394_LinkControlClear,
1510 OHCI1394_LinkControl_rcvPhyPkt);
1511 reg_write(ohci, OHCI1394_LinkControlSet,
1512 OHCI1394_LinkControl_rcvSelfID |
1513 OHCI1394_LinkControl_cycleTimerEnable |
1514 OHCI1394_LinkControl_cycleMaster);
1516 reg_write(ohci, OHCI1394_ATRetries,
1517 OHCI1394_MAX_AT_REQ_RETRIES |
1518 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1519 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1521 ar_context_run(&ohci->ar_request_ctx);
1522 ar_context_run(&ohci->ar_response_ctx);
1524 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1525 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1526 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1527 reg_write(ohci, OHCI1394_IntMaskSet,
1528 OHCI1394_selfIDComplete |
1529 OHCI1394_RQPkt | OHCI1394_RSPkt |
1530 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1531 OHCI1394_isochRx | OHCI1394_isochTx |
1532 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1533 OHCI1394_cycleInconsistent | OHCI1394_regAccessFail |
1534 OHCI1394_masterIntEnable);
1535 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1536 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
1538 /* Activate link_on bit and contender bit in our self ID packets.*/
1539 if (ohci_update_phy_reg(card, 4, 0,
1540 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
1544 * When the link is not yet enabled, the atomic config rom
1545 * update mechanism described below in ohci_set_config_rom()
1546 * is not active. We have to update ConfigRomHeader and
1547 * BusOptions manually, and the write to ConfigROMmap takes
1548 * effect immediately. We tie this to the enabling of the
1549 * link, so we have a valid config rom before enabling - the
1550 * OHCI requires that ConfigROMhdr and BusOptions have valid
1551 * values before enabling.
1553 * However, when the ConfigROMmap is written, some controllers
1554 * always read back quadlets 0 and 2 from the config rom to
1555 * the ConfigRomHeader and BusOptions registers on bus reset.
1556 * They shouldn't do that in this initial case where the link
1557 * isn't enabled. This means we have to use the same
1558 * workaround here, setting the bus header to 0 and then write
1559 * the right values in the bus reset tasklet.
1563 ohci->next_config_rom =
1564 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1565 &ohci->next_config_rom_bus,
1567 if (ohci->next_config_rom == NULL)
1570 copy_config_rom(ohci->next_config_rom, config_rom, length);
1573 * In the suspend case, config_rom is NULL, which
1574 * means that we just reuse the old config rom.
1576 ohci->next_config_rom = ohci->config_rom;
1577 ohci->next_config_rom_bus = ohci->config_rom_bus;
1580 ohci->next_header = ohci->next_config_rom[0];
1581 ohci->next_config_rom[0] = 0;
1582 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1583 reg_write(ohci, OHCI1394_BusOptions,
1584 be32_to_cpu(ohci->next_config_rom[2]));
1585 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1587 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1589 if (request_irq(dev->irq, irq_handler,
1590 IRQF_SHARED, ohci_driver_name, ohci)) {
1591 fw_error("Failed to allocate shared interrupt %d.\n",
1593 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1594 ohci->config_rom, ohci->config_rom_bus);
1598 reg_write(ohci, OHCI1394_HCControlSet,
1599 OHCI1394_HCControl_linkEnable |
1600 OHCI1394_HCControl_BIBimageValid);
1604 * We are ready to go, initiate bus reset to finish the
1608 fw_core_initiate_bus_reset(&ohci->card, 1);
1613 static int ohci_set_config_rom(struct fw_card *card,
1614 const __be32 *config_rom, size_t length)
1616 struct fw_ohci *ohci;
1617 unsigned long flags;
1619 __be32 *next_config_rom;
1620 dma_addr_t uninitialized_var(next_config_rom_bus);
1622 ohci = fw_ohci(card);
1625 * When the OHCI controller is enabled, the config rom update
1626 * mechanism is a bit tricky, but easy enough to use. See
1627 * section 5.5.6 in the OHCI specification.
1629 * The OHCI controller caches the new config rom address in a
1630 * shadow register (ConfigROMmapNext) and needs a bus reset
1631 * for the changes to take place. When the bus reset is
1632 * detected, the controller loads the new values for the
1633 * ConfigRomHeader and BusOptions registers from the specified
1634 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1635 * shadow register. All automatically and atomically.
1637 * Now, there's a twist to this story. The automatic load of
1638 * ConfigRomHeader and BusOptions doesn't honor the
1639 * noByteSwapData bit, so with a be32 config rom, the
1640 * controller will load be32 values in to these registers
1641 * during the atomic update, even on litte endian
1642 * architectures. The workaround we use is to put a 0 in the
1643 * header quadlet; 0 is endian agnostic and means that the
1644 * config rom isn't ready yet. In the bus reset tasklet we
1645 * then set up the real values for the two registers.
1647 * We use ohci->lock to avoid racing with the code that sets
1648 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1652 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1653 &next_config_rom_bus, GFP_KERNEL);
1654 if (next_config_rom == NULL)
1657 spin_lock_irqsave(&ohci->lock, flags);
1659 if (ohci->next_config_rom == NULL) {
1660 ohci->next_config_rom = next_config_rom;
1661 ohci->next_config_rom_bus = next_config_rom_bus;
1663 copy_config_rom(ohci->next_config_rom, config_rom, length);
1665 ohci->next_header = config_rom[0];
1666 ohci->next_config_rom[0] = 0;
1668 reg_write(ohci, OHCI1394_ConfigROMmap,
1669 ohci->next_config_rom_bus);
1673 spin_unlock_irqrestore(&ohci->lock, flags);
1676 * Now initiate a bus reset to have the changes take
1677 * effect. We clean up the old config rom memory and DMA
1678 * mappings in the bus reset tasklet, since the OHCI
1679 * controller could need to access it before the bus reset
1683 fw_core_initiate_bus_reset(&ohci->card, 1);
1685 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1686 next_config_rom, next_config_rom_bus);
1691 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1693 struct fw_ohci *ohci = fw_ohci(card);
1695 at_context_transmit(&ohci->at_request_ctx, packet);
1698 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1700 struct fw_ohci *ohci = fw_ohci(card);
1702 at_context_transmit(&ohci->at_response_ctx, packet);
1705 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1707 struct fw_ohci *ohci = fw_ohci(card);
1708 struct context *ctx = &ohci->at_request_ctx;
1709 struct driver_data *driver_data = packet->driver_data;
1712 tasklet_disable(&ctx->tasklet);
1714 if (packet->ack != 0)
1717 if (packet->payload_mapped)
1718 dma_unmap_single(ohci->card.device, packet->payload_bus,
1719 packet->payload_length, DMA_TO_DEVICE);
1721 log_ar_at_event('T', packet->speed, packet->header, 0x20);
1722 driver_data->packet = NULL;
1723 packet->ack = RCODE_CANCELLED;
1724 packet->callback(packet, &ohci->card, packet->ack);
1727 tasklet_enable(&ctx->tasklet);
1732 static int ohci_enable_phys_dma(struct fw_card *card,
1733 int node_id, int generation)
1735 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1738 struct fw_ohci *ohci = fw_ohci(card);
1739 unsigned long flags;
1743 * FIXME: Make sure this bitmask is cleared when we clear the busReset
1744 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
1747 spin_lock_irqsave(&ohci->lock, flags);
1749 if (ohci->generation != generation) {
1755 * Note, if the node ID contains a non-local bus ID, physical DMA is
1756 * enabled for _all_ nodes on remote buses.
1759 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1761 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1763 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1767 spin_unlock_irqrestore(&ohci->lock, flags);
1770 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1773 static u32 cycle_timer_ticks(u32 cycle_timer)
1777 ticks = cycle_timer & 0xfff;
1778 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1779 ticks += (3072 * 8000) * (cycle_timer >> 25);
1785 * Some controllers exhibit one or more of the following bugs when updating the
1786 * iso cycle timer register:
1787 * - When the lowest six bits are wrapping around to zero, a read that happens
1788 * at the same time will return garbage in the lowest ten bits.
1789 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1790 * not incremented for about 60 ns.
1791 * - Occasionally, the entire register reads zero.
1793 * To catch these, we read the register three times and ensure that the
1794 * difference between each two consecutive reads is approximately the same, i.e.
1795 * less than twice the other. Furthermore, any negative difference indicates an
1796 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1797 * execute, so we have enough precision to compute the ratio of the differences.)
1799 static u32 ohci_get_cycle_time(struct fw_card *card)
1801 struct fw_ohci *ohci = fw_ohci(card);
1807 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1809 if (ohci->iso_cycle_timer_quirk) {
1812 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1816 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1817 t0 = cycle_timer_ticks(c0);
1818 t1 = cycle_timer_ticks(c1);
1819 t2 = cycle_timer_ticks(c2);
1822 } while ((diff01 <= 0 || diff12 <= 0 ||
1823 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1830 static void copy_iso_headers(struct iso_context *ctx, void *p)
1832 int i = ctx->header_length;
1834 if (i + ctx->base.header_size > PAGE_SIZE)
1838 * The iso header is byteswapped to little endian by
1839 * the controller, but the remaining header quadlets
1840 * are big endian. We want to present all the headers
1841 * as big endian, so we have to swap the first quadlet.
1843 if (ctx->base.header_size > 0)
1844 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1845 if (ctx->base.header_size > 4)
1846 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
1847 if (ctx->base.header_size > 8)
1848 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
1849 ctx->header_length += ctx->base.header_size;
1852 static int handle_ir_packet_per_buffer(struct context *context,
1853 struct descriptor *d,
1854 struct descriptor *last)
1856 struct iso_context *ctx =
1857 container_of(context, struct iso_context, context);
1858 struct descriptor *pd;
1862 for (pd = d; pd <= last; pd++) {
1863 if (pd->transfer_status)
1867 /* Descriptor(s) not done yet, stop iteration */
1871 copy_iso_headers(ctx, p);
1873 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1874 ir_header = (__le32 *) p;
1875 ctx->base.callback(&ctx->base,
1876 le32_to_cpu(ir_header[0]) & 0xffff,
1877 ctx->header_length, ctx->header,
1878 ctx->base.callback_data);
1879 ctx->header_length = 0;
1885 static int handle_it_packet(struct context *context,
1886 struct descriptor *d,
1887 struct descriptor *last)
1889 struct iso_context *ctx =
1890 container_of(context, struct iso_context, context);
1892 struct descriptor *pd;
1894 for (pd = d; pd <= last; pd++)
1895 if (pd->transfer_status)
1898 /* Descriptor(s) not done yet, stop iteration */
1901 i = ctx->header_length;
1902 if (i + 4 < PAGE_SIZE) {
1903 /* Present this value as big-endian to match the receive code */
1904 *(__be32 *)(ctx->header + i) = cpu_to_be32(
1905 ((u32)le16_to_cpu(pd->transfer_status) << 16) |
1906 le16_to_cpu(pd->res_count));
1907 ctx->header_length += 4;
1909 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1910 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1911 ctx->header_length, ctx->header,
1912 ctx->base.callback_data);
1913 ctx->header_length = 0;
1918 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
1919 int type, int channel, size_t header_size)
1921 struct fw_ohci *ohci = fw_ohci(card);
1922 struct iso_context *ctx, *list;
1923 descriptor_callback_t callback;
1924 u64 *channels, dont_care = ~0ULL;
1926 unsigned long flags;
1927 int index, ret = -ENOMEM;
1929 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1930 channels = &dont_care;
1931 mask = &ohci->it_context_mask;
1932 list = ohci->it_context_list;
1933 callback = handle_it_packet;
1935 channels = &ohci->ir_context_channels;
1936 mask = &ohci->ir_context_mask;
1937 list = ohci->ir_context_list;
1938 callback = handle_ir_packet_per_buffer;
1941 spin_lock_irqsave(&ohci->lock, flags);
1942 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
1944 *channels &= ~(1ULL << channel);
1945 *mask &= ~(1 << index);
1947 spin_unlock_irqrestore(&ohci->lock, flags);
1950 return ERR_PTR(-EBUSY);
1952 if (type == FW_ISO_CONTEXT_TRANSMIT)
1953 regs = OHCI1394_IsoXmitContextBase(index);
1955 regs = OHCI1394_IsoRcvContextBase(index);
1958 memset(ctx, 0, sizeof(*ctx));
1959 ctx->header_length = 0;
1960 ctx->header = (void *) __get_free_page(GFP_KERNEL);
1961 if (ctx->header == NULL)
1964 ret = context_init(&ctx->context, ohci, regs, callback);
1966 goto out_with_header;
1971 free_page((unsigned long)ctx->header);
1973 spin_lock_irqsave(&ohci->lock, flags);
1974 *mask |= 1 << index;
1975 spin_unlock_irqrestore(&ohci->lock, flags);
1977 return ERR_PTR(ret);
1980 static int ohci_start_iso(struct fw_iso_context *base,
1981 s32 cycle, u32 sync, u32 tags)
1983 struct iso_context *ctx = container_of(base, struct iso_context, base);
1984 struct fw_ohci *ohci = ctx->context.ohci;
1988 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1989 index = ctx - ohci->it_context_list;
1992 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
1993 (cycle & 0x7fff) << 16;
1995 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
1996 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
1997 context_run(&ctx->context, match);
1999 index = ctx - ohci->ir_context_list;
2000 control = IR_CONTEXT_ISOCH_HEADER;
2001 match = (tags << 28) | (sync << 8) | ctx->base.channel;
2003 match |= (cycle & 0x07fff) << 12;
2004 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
2007 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
2008 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
2009 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
2010 context_run(&ctx->context, control);
2016 static int ohci_stop_iso(struct fw_iso_context *base)
2018 struct fw_ohci *ohci = fw_ohci(base->card);
2019 struct iso_context *ctx = container_of(base, struct iso_context, base);
2022 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
2023 index = ctx - ohci->it_context_list;
2024 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
2026 index = ctx - ohci->ir_context_list;
2027 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
2030 context_stop(&ctx->context);
2035 static void ohci_free_iso_context(struct fw_iso_context *base)
2037 struct fw_ohci *ohci = fw_ohci(base->card);
2038 struct iso_context *ctx = container_of(base, struct iso_context, base);
2039 unsigned long flags;
2042 ohci_stop_iso(base);
2043 context_release(&ctx->context);
2044 free_page((unsigned long)ctx->header);
2046 spin_lock_irqsave(&ohci->lock, flags);
2048 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
2049 index = ctx - ohci->it_context_list;
2050 ohci->it_context_mask |= 1 << index;
2052 index = ctx - ohci->ir_context_list;
2053 ohci->ir_context_mask |= 1 << index;
2054 ohci->ir_context_channels |= 1ULL << base->channel;
2057 spin_unlock_irqrestore(&ohci->lock, flags);
2060 static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2061 struct fw_iso_packet *packet,
2062 struct fw_iso_buffer *buffer,
2063 unsigned long payload)
2065 struct iso_context *ctx = container_of(base, struct iso_context, base);
2066 struct descriptor *d, *last, *pd;
2067 struct fw_iso_packet *p;
2069 dma_addr_t d_bus, page_bus;
2070 u32 z, header_z, payload_z, irq;
2071 u32 payload_index, payload_end_index, next_page_index;
2072 int page, end_page, i, length, offset;
2075 payload_index = payload;
2081 if (p->header_length > 0)
2084 /* Determine the first page the payload isn't contained in. */
2085 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
2086 if (p->payload_length > 0)
2087 payload_z = end_page - (payload_index >> PAGE_SHIFT);
2093 /* Get header size in number of descriptors. */
2094 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
2096 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
2101 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
2102 d[0].req_count = cpu_to_le16(8);
2104 * Link the skip address to this descriptor itself. This causes
2105 * a context to skip a cycle whenever lost cycles or FIFO
2106 * overruns occur, without dropping the data. The application
2107 * should then decide whether this is an error condition or not.
2108 * FIXME: Make the context's cycle-lost behaviour configurable?
2110 d[0].branch_address = cpu_to_le32(d_bus | z);
2112 header = (__le32 *) &d[1];
2113 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
2114 IT_HEADER_TAG(p->tag) |
2115 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
2116 IT_HEADER_CHANNEL(ctx->base.channel) |
2117 IT_HEADER_SPEED(ctx->base.speed));
2119 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
2120 p->payload_length));
2123 if (p->header_length > 0) {
2124 d[2].req_count = cpu_to_le16(p->header_length);
2125 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
2126 memcpy(&d[z], p->header, p->header_length);
2129 pd = d + z - payload_z;
2130 payload_end_index = payload_index + p->payload_length;
2131 for (i = 0; i < payload_z; i++) {
2132 page = payload_index >> PAGE_SHIFT;
2133 offset = payload_index & ~PAGE_MASK;
2134 next_page_index = (page + 1) << PAGE_SHIFT;
2136 min(next_page_index, payload_end_index) - payload_index;
2137 pd[i].req_count = cpu_to_le16(length);
2139 page_bus = page_private(buffer->pages[page]);
2140 pd[i].data_address = cpu_to_le32(page_bus + offset);
2142 payload_index += length;
2146 irq = DESCRIPTOR_IRQ_ALWAYS;
2148 irq = DESCRIPTOR_NO_IRQ;
2150 last = z == 2 ? d : d + z - 1;
2151 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
2153 DESCRIPTOR_BRANCH_ALWAYS |
2156 context_append(&ctx->context, d, z, header_z);
2161 static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2162 struct fw_iso_packet *packet,
2163 struct fw_iso_buffer *buffer,
2164 unsigned long payload)
2166 struct iso_context *ctx = container_of(base, struct iso_context, base);
2167 struct descriptor *d, *pd;
2168 struct fw_iso_packet *p = packet;
2169 dma_addr_t d_bus, page_bus;
2170 u32 z, header_z, rest;
2172 int page, offset, packet_count, header_size, payload_per_buffer;
2175 * The OHCI controller puts the isochronous header and trailer in the
2176 * buffer, so we need at least 8 bytes.
2178 packet_count = p->header_length / ctx->base.header_size;
2179 header_size = max(ctx->base.header_size, (size_t)8);
2181 /* Get header size in number of descriptors. */
2182 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2183 page = payload >> PAGE_SHIFT;
2184 offset = payload & ~PAGE_MASK;
2185 payload_per_buffer = p->payload_length / packet_count;
2187 for (i = 0; i < packet_count; i++) {
2188 /* d points to the header descriptor */
2189 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
2190 d = context_get_descriptors(&ctx->context,
2191 z + header_z, &d_bus);
2195 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
2196 DESCRIPTOR_INPUT_MORE);
2197 if (p->skip && i == 0)
2198 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2199 d->req_count = cpu_to_le16(header_size);
2200 d->res_count = d->req_count;
2201 d->transfer_status = 0;
2202 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
2204 rest = payload_per_buffer;
2206 for (j = 1; j < z; j++) {
2208 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2209 DESCRIPTOR_INPUT_MORE);
2211 if (offset + rest < PAGE_SIZE)
2214 length = PAGE_SIZE - offset;
2215 pd->req_count = cpu_to_le16(length);
2216 pd->res_count = pd->req_count;
2217 pd->transfer_status = 0;
2219 page_bus = page_private(buffer->pages[page]);
2220 pd->data_address = cpu_to_le32(page_bus + offset);
2222 offset = (offset + length) & ~PAGE_MASK;
2227 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2228 DESCRIPTOR_INPUT_LAST |
2229 DESCRIPTOR_BRANCH_ALWAYS);
2230 if (p->interrupt && i == packet_count - 1)
2231 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2233 context_append(&ctx->context, d, z, header_z);
2239 static int ohci_queue_iso(struct fw_iso_context *base,
2240 struct fw_iso_packet *packet,
2241 struct fw_iso_buffer *buffer,
2242 unsigned long payload)
2244 struct iso_context *ctx = container_of(base, struct iso_context, base);
2245 unsigned long flags;
2248 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2249 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
2250 ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
2252 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
2254 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2259 static const struct fw_card_driver ohci_driver = {
2260 .enable = ohci_enable,
2261 .update_phy_reg = ohci_update_phy_reg,
2262 .set_config_rom = ohci_set_config_rom,
2263 .send_request = ohci_send_request,
2264 .send_response = ohci_send_response,
2265 .cancel_packet = ohci_cancel_packet,
2266 .enable_phys_dma = ohci_enable_phys_dma,
2267 .get_cycle_time = ohci_get_cycle_time,
2269 .allocate_iso_context = ohci_allocate_iso_context,
2270 .free_iso_context = ohci_free_iso_context,
2271 .queue_iso = ohci_queue_iso,
2272 .start_iso = ohci_start_iso,
2273 .stop_iso = ohci_stop_iso,
2276 #ifdef CONFIG_PPC_PMAC
2277 static void ohci_pmac_on(struct pci_dev *dev)
2279 if (machine_is(powermac)) {
2280 struct device_node *ofn = pci_device_to_OF_node(dev);
2283 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
2284 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
2289 static void ohci_pmac_off(struct pci_dev *dev)
2291 if (machine_is(powermac)) {
2292 struct device_node *ofn = pci_device_to_OF_node(dev);
2295 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
2296 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
2301 #define ohci_pmac_on(dev)
2302 #define ohci_pmac_off(dev)
2303 #endif /* CONFIG_PPC_PMAC */
2305 static int __devinit pci_probe(struct pci_dev *dev,
2306 const struct pci_device_id *ent)
2308 struct fw_ohci *ohci;
2309 u32 bus_options, max_receive, link_speed, version;
2314 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
2320 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
2324 err = pci_enable_device(dev);
2326 fw_error("Failed to enable OHCI hardware\n");
2330 pci_set_master(dev);
2331 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
2332 pci_set_drvdata(dev, ohci);
2334 spin_lock_init(&ohci->lock);
2336 tasklet_init(&ohci->bus_reset_tasklet,
2337 bus_reset_tasklet, (unsigned long)ohci);
2339 err = pci_request_region(dev, 0, ohci_driver_name);
2341 fw_error("MMIO resource unavailable\n");
2345 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
2346 if (ohci->registers == NULL) {
2347 fw_error("Failed to remap registers\n");
2352 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2354 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
2355 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
2356 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
2358 ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
2360 ohci->iso_cycle_timer_quirk = dev->vendor == PCI_VENDOR_ID_AL ||
2361 dev->vendor == PCI_VENDOR_ID_NEC ||
2362 dev->vendor == PCI_VENDOR_ID_VIA;
2364 ar_context_init(&ohci->ar_request_ctx, ohci,
2365 OHCI1394_AsReqRcvContextControlSet);
2367 ar_context_init(&ohci->ar_response_ctx, ohci,
2368 OHCI1394_AsRspRcvContextControlSet);
2370 context_init(&ohci->at_request_ctx, ohci,
2371 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
2373 context_init(&ohci->at_response_ctx, ohci,
2374 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
2376 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
2377 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
2378 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
2379 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
2380 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2382 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2383 ohci->ir_context_channels = ~0ULL;
2384 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2385 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2386 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
2387 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2389 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
2394 /* self-id dma buffer allocation */
2395 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
2399 if (ohci->self_id_cpu == NULL) {
2404 bus_options = reg_read(ohci, OHCI1394_BusOptions);
2405 max_receive = (bus_options >> 12) & 0xf;
2406 link_speed = bus_options & 0x7;
2407 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
2408 reg_read(ohci, OHCI1394_GUIDLo);
2410 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
2414 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
2415 dev_name(&dev->dev), version >> 16, version & 0xff);
2420 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2421 ohci->self_id_cpu, ohci->self_id_bus);
2423 kfree(ohci->ir_context_list);
2424 kfree(ohci->it_context_list);
2425 context_release(&ohci->at_response_ctx);
2426 context_release(&ohci->at_request_ctx);
2427 ar_context_release(&ohci->ar_response_ctx);
2428 ar_context_release(&ohci->ar_request_ctx);
2429 pci_iounmap(dev, ohci->registers);
2431 pci_release_region(dev, 0);
2433 pci_disable_device(dev);
2439 fw_error("Out of memory\n");
2444 static void pci_remove(struct pci_dev *dev)
2446 struct fw_ohci *ohci;
2448 ohci = pci_get_drvdata(dev);
2449 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2451 fw_core_remove_card(&ohci->card);
2454 * FIXME: Fail all pending packets here, now that the upper
2455 * layers can't queue any more.
2458 software_reset(ohci);
2459 free_irq(dev->irq, ohci);
2461 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
2462 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2463 ohci->next_config_rom, ohci->next_config_rom_bus);
2464 if (ohci->config_rom)
2465 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2466 ohci->config_rom, ohci->config_rom_bus);
2467 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2468 ohci->self_id_cpu, ohci->self_id_bus);
2469 ar_context_release(&ohci->ar_request_ctx);
2470 ar_context_release(&ohci->ar_response_ctx);
2471 context_release(&ohci->at_request_ctx);
2472 context_release(&ohci->at_response_ctx);
2473 kfree(ohci->it_context_list);
2474 kfree(ohci->ir_context_list);
2475 pci_iounmap(dev, ohci->registers);
2476 pci_release_region(dev, 0);
2477 pci_disable_device(dev);
2481 fw_notify("Removed fw-ohci device.\n");
2485 static int pci_suspend(struct pci_dev *dev, pm_message_t state)
2487 struct fw_ohci *ohci = pci_get_drvdata(dev);
2490 software_reset(ohci);
2491 free_irq(dev->irq, ohci);
2492 err = pci_save_state(dev);
2494 fw_error("pci_save_state failed\n");
2497 err = pci_set_power_state(dev, pci_choose_state(dev, state));
2499 fw_error("pci_set_power_state failed with %d\n", err);
2505 static int pci_resume(struct pci_dev *dev)
2507 struct fw_ohci *ohci = pci_get_drvdata(dev);
2511 pci_set_power_state(dev, PCI_D0);
2512 pci_restore_state(dev);
2513 err = pci_enable_device(dev);
2515 fw_error("pci_enable_device failed\n");
2519 return ohci_enable(&ohci->card, NULL, 0);
2523 static const struct pci_device_id pci_table[] = {
2524 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
2528 MODULE_DEVICE_TABLE(pci, pci_table);
2530 static struct pci_driver fw_ohci_pci_driver = {
2531 .name = ohci_driver_name,
2532 .id_table = pci_table,
2534 .remove = pci_remove,
2536 .resume = pci_resume,
2537 .suspend = pci_suspend,
2541 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
2542 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
2543 MODULE_LICENSE("GPL");
2545 /* Provide a module alias so root-on-sbp2 initrds don't break. */
2546 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
2547 MODULE_ALIAS("ohci1394");
2550 static int __init fw_ohci_init(void)
2552 return pci_register_driver(&fw_ohci_pci_driver);
2555 static void __exit fw_ohci_cleanup(void)
2557 pci_unregister_driver(&fw_ohci_pci_driver);
2560 module_init(fw_ohci_init);
2561 module_exit(fw_ohci_cleanup);