2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
31 #include <asm/uaccess.h>
34 #include "../vme_bridge.h"
35 #include "vme_tsi148.h"
37 static int __init tsi148_init(void);
38 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
39 static void tsi148_remove(struct pci_dev *);
40 static void __exit tsi148_exit(void);
43 int tsi148_slave_set(struct vme_slave_resource *, int, unsigned long long,
44 unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
45 int tsi148_slave_get(struct vme_slave_resource *, int *, unsigned long long *,
46 unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
48 int tsi148_master_get(struct vme_master_resource *, int *, unsigned long long *,
49 unsigned long long *, vme_address_t *, vme_cycle_t *, vme_width_t *);
50 int tsi148_master_set(struct vme_master_resource *, int, unsigned long long,
51 unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
52 ssize_t tsi148_master_read(struct vme_master_resource *, void *, size_t,
54 ssize_t tsi148_master_write(struct vme_master_resource *, void *, size_t,
56 unsigned int tsi148_master_rmw(struct vme_master_resource *, unsigned int,
57 unsigned int, unsigned int, loff_t);
58 int tsi148_dma_list_add (struct vme_dma_list *, struct vme_dma_attr *,
59 struct vme_dma_attr *, size_t);
60 int tsi148_dma_list_exec(struct vme_dma_list *);
61 int tsi148_dma_list_empty(struct vme_dma_list *);
62 int tsi148_generate_irq(int, int);
64 /* Module parameter */
68 static char driver_name[] = "vme_tsi148";
70 static const struct pci_device_id tsi148_ids[] = {
71 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
75 static struct pci_driver tsi148_driver = {
77 .id_table = tsi148_ids,
78 .probe = tsi148_probe,
79 .remove = tsi148_remove,
82 static void reg_join(unsigned int high, unsigned int low,
83 unsigned long long *variable)
85 *variable = (unsigned long long)high << 32;
86 *variable |= (unsigned long long)low;
89 static void reg_split(unsigned long long variable, unsigned int *high,
92 *low = (unsigned int)variable & 0xFFFFFFFF;
93 *high = (unsigned int)(variable >> 32);
99 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
104 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
105 wake_up(&(bridge->dma_queue[0]));
106 serviced |= TSI148_LCSR_INTC_DMA0C;
108 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
109 wake_up(&(bridge->dma_queue[1]));
110 serviced |= TSI148_LCSR_INTC_DMA1C;
117 * Wake up location monitor queue
119 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
124 for (i = 0; i < 4; i++) {
125 if(stat & TSI148_LCSR_INTS_LMS[i]) {
126 /* We only enable interrupts if the callback is set */
127 bridge->lm_callback[i](i);
128 serviced |= TSI148_LCSR_INTC_LMC[i];
136 * Wake up mail box queue.
138 * XXX This functionality is not exposed up though API.
140 static u32 tsi148_MB_irqhandler(struct tsi148_driver *bridge, u32 stat)
146 for (i = 0; i < 4; i++) {
147 if(stat & TSI148_LCSR_INTS_MBS[i]) {
148 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
149 printk("VME Mailbox %d received: 0x%x\n", i, val);
150 serviced |= TSI148_LCSR_INTC_MBC[i];
158 * Display error & status message when PERR (PCI) exception interrupt occurs.
160 static u32 tsi148_PERR_irqhandler(struct tsi148_driver *bridge)
163 "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
164 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
165 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
166 ioread32be(bridge->base + TSI148_LCSR_EDPAT)
169 "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
170 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
171 ioread32be(bridge->base + TSI148_LCSR_EDPXS)
174 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
176 return TSI148_LCSR_INTC_PERRC;
180 * Save address and status when VME error interrupt occurs.
182 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
184 unsigned int error_addr_high, error_addr_low;
185 unsigned long long error_addr;
187 struct vme_bus_error *error;
188 struct tsi148_driver *bridge;
190 bridge = tsi148_bridge->driver_priv;
192 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
193 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
194 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
196 reg_join(error_addr_high, error_addr_low, &error_addr);
198 /* Check for exception register overflow (we have lost error data) */
199 if(error_attrib & TSI148_LCSR_VEAT_VEOF) {
200 printk(KERN_ERR "VME Bus Exception Overflow Occurred\n");
203 error = (struct vme_bus_error *)kmalloc(sizeof (struct vme_bus_error),
206 error->address = error_addr;
207 error->attributes = error_attrib;
208 list_add_tail(&(error->list), &(tsi148_bridge->vme_errors));
211 "Unable to alloc memory for VMEbus Error reporting\n");
213 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
214 error_addr, error_attrib);
218 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
220 return TSI148_LCSR_INTC_VERRC;
224 * Wake up IACK queue.
226 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
228 wake_up(&(bridge->iack_queue));
230 return TSI148_LCSR_INTC_IACKC;
234 * Calling VME bus interrupt callback if provided.
236 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
239 int vec, i, serviced = 0;
240 struct tsi148_driver *bridge;
242 bridge = tsi148_bridge->driver_priv;
244 for (i = 7; i > 0; i--) {
245 if (stat & (1 << i)) {
247 * Note: Even though the registers are defined
248 * as 32-bits in the spec, we only want to issue
249 * 8-bit IACK cycles on the bus, read from offset
252 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
254 vme_irq_handler(tsi148_bridge, i, vec);
256 serviced |= (1 << i);
264 * Top level interrupt handler. Clears appropriate interrupt status bits and
265 * then calls appropriate sub handler(s).
267 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
269 u32 stat, enable, serviced = 0;
270 struct vme_bridge *tsi148_bridge;
271 struct tsi148_driver *bridge;
275 bridge = tsi148_bridge->driver_priv;
277 /* Determine which interrupts are unmasked and set */
278 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
279 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
281 /* Only look at unmasked interrupts */
284 if (unlikely(!stat)) {
288 /* Call subhandlers as appropriate */
290 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
291 serviced |= tsi148_DMA_irqhandler(bridge, stat);
293 /* Location monitor irqs */
294 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
295 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
296 serviced |= tsi148_LM_irqhandler(bridge, stat);
299 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
300 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
301 serviced |= tsi148_MB_irqhandler(bridge, stat);
304 if (stat & TSI148_LCSR_INTS_PERRS)
305 serviced |= tsi148_PERR_irqhandler(bridge);
308 if (stat & TSI148_LCSR_INTS_VERRS)
309 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
312 if (stat & TSI148_LCSR_INTS_IACKS)
313 serviced |= tsi148_IACK_irqhandler(bridge);
316 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
317 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
318 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
319 TSI148_LCSR_INTS_IRQ1S))
320 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
322 /* Clear serviced interrupts */
323 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
328 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
332 struct pci_dev *pdev;
333 struct tsi148_driver *bridge;
335 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
337 bridge = tsi148_bridge->driver_priv;
339 /* Initialise list for VME bus errors */
340 INIT_LIST_HEAD(&(tsi148_bridge->vme_errors));
342 mutex_init(&(tsi148_bridge->irq_mtx));
344 result = request_irq(pdev->irq,
347 driver_name, tsi148_bridge);
349 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
354 /* Enable and unmask interrupts */
355 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
356 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
357 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
358 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
359 TSI148_LCSR_INTEO_IACKEO;
361 /* This leaves the following interrupts masked.
362 * TSI148_LCSR_INTEO_VIEEO
363 * TSI148_LCSR_INTEO_SYSFLEO
364 * TSI148_LCSR_INTEO_ACFLEO
367 /* Don't enable Location Monitor interrupts here - they will be
368 * enabled when the location monitors are properly configured and
369 * a callback has been attached.
370 * TSI148_LCSR_INTEO_LM0EO
371 * TSI148_LCSR_INTEO_LM1EO
372 * TSI148_LCSR_INTEO_LM2EO
373 * TSI148_LCSR_INTEO_LM3EO
376 /* Don't enable VME interrupts until we add a handler, else the board
377 * will respond to it and we don't want that unless it knows how to
378 * properly deal with it.
379 * TSI148_LCSR_INTEO_IRQ7EO
380 * TSI148_LCSR_INTEO_IRQ6EO
381 * TSI148_LCSR_INTEO_IRQ5EO
382 * TSI148_LCSR_INTEO_IRQ4EO
383 * TSI148_LCSR_INTEO_IRQ3EO
384 * TSI148_LCSR_INTEO_IRQ2EO
385 * TSI148_LCSR_INTEO_IRQ1EO
388 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
389 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
394 static void tsi148_irq_exit(struct tsi148_driver *bridge, struct pci_dev *pdev)
396 /* Turn off interrupts */
397 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
398 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
400 /* Clear all interrupts */
401 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
403 /* Detach interrupt handler */
404 free_irq(pdev->irq, pdev);
408 * Check to see if an IACk has been received, return true (1) or false (0).
410 int tsi148_iack_received(struct tsi148_driver *bridge)
414 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
416 if (tmp & TSI148_LCSR_VICR_IRQS)
423 * Configure VME interrupt
425 void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
428 struct pci_dev *pdev;
430 struct tsi148_driver *bridge;
432 bridge = tsi148_bridge->driver_priv;
434 /* We need to do the ordering differently for enabling and disabling */
436 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
437 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
438 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
440 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
441 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
442 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
445 pdev = container_of(tsi148_bridge->parent,
446 struct pci_dev, dev);
448 synchronize_irq(pdev->irq);
451 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
452 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
453 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
455 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
456 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
457 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
462 * Generate a VME bus interrupt at the requested level & vector. Wait for
463 * interrupt to be acked.
465 int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, int statid)
468 struct tsi148_driver *bridge;
470 bridge = tsi148_bridge->driver_priv;
472 mutex_lock(&(bridge->vme_int));
474 /* Read VICR register */
475 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
478 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
479 (statid & TSI148_LCSR_VICR_STID_M);
480 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
482 /* Assert VMEbus IRQ */
483 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
484 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
486 /* XXX Consider implementing a timeout? */
487 wait_event_interruptible(bridge->iack_queue,
488 tsi148_iack_received(bridge));
490 mutex_unlock(&(bridge->vme_int));
496 * Find the first error in this address range
498 static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
499 vme_address_t aspace, unsigned long long address, size_t count)
501 struct list_head *err_pos;
502 struct vme_bus_error *vme_err, *valid = NULL;
503 unsigned long long bound;
505 bound = address + count;
508 * XXX We are currently not looking at the address space when parsing
509 * for errors. This is because parsing the Address Modifier Codes
510 * is going to be quite resource intensive to do properly. We
511 * should be OK just looking at the addresses and this is certainly
512 * much better than what we had before.
515 /* Iterate through errors */
516 list_for_each(err_pos, &(tsi148_bridge->vme_errors)) {
517 vme_err = list_entry(err_pos, struct vme_bus_error, list);
518 if((vme_err->address >= address) && (vme_err->address < bound)){
528 * Clear errors in the provided address range.
530 static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
531 vme_address_t aspace, unsigned long long address, size_t count)
533 struct list_head *err_pos, *temp;
534 struct vme_bus_error *vme_err;
535 unsigned long long bound;
537 bound = address + count;
540 * XXX We are currently not looking at the address space when parsing
541 * for errors. This is because parsing the Address Modifier Codes
542 * is going to be quite resource intensive to do properly. We
543 * should be OK just looking at the addresses and this is certainly
544 * much better than what we had before.
547 /* Iterate through errors */
548 list_for_each_safe(err_pos, temp, &(tsi148_bridge->vme_errors)) {
549 vme_err = list_entry(err_pos, struct vme_bus_error, list);
551 if((vme_err->address >= address) && (vme_err->address < bound)){
559 * Initialize a slave window with the requested attributes.
561 int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
562 unsigned long long vme_base, unsigned long long size,
563 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
565 unsigned int i, addr = 0, granularity = 0;
566 unsigned int temp_ctl = 0;
567 unsigned int vme_base_low, vme_base_high;
568 unsigned int vme_bound_low, vme_bound_high;
569 unsigned int pci_offset_low, pci_offset_high;
570 unsigned long long vme_bound, pci_offset;
571 struct tsi148_driver *bridge;
573 bridge = image->parent->driver_priv;
580 addr |= TSI148_LCSR_ITAT_AS_A16;
583 granularity = 0x1000;
584 addr |= TSI148_LCSR_ITAT_AS_A24;
587 granularity = 0x10000;
588 addr |= TSI148_LCSR_ITAT_AS_A32;
591 granularity = 0x10000;
592 addr |= TSI148_LCSR_ITAT_AS_A64;
600 printk("Invalid address space\n");
605 /* Convert 64-bit variables to 2x 32-bit variables */
606 reg_split(vme_base, &vme_base_high, &vme_base_low);
609 * Bound address is a valid address for the window, adjust
612 vme_bound = vme_base + size - granularity;
613 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
614 pci_offset = (unsigned long long)pci_base - vme_base;
615 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
617 if (vme_base_low & (granularity - 1)) {
618 printk("Invalid VME base alignment\n");
621 if (vme_bound_low & (granularity - 1)) {
622 printk("Invalid VME bound alignment\n");
625 if (pci_offset_low & (granularity - 1)) {
626 printk("Invalid PCI Offset alignment\n");
630 /* Disable while we are mucking around */
631 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
632 TSI148_LCSR_OFFSET_ITAT);
633 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
634 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
635 TSI148_LCSR_OFFSET_ITAT);
638 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
639 TSI148_LCSR_OFFSET_ITSAU);
640 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
641 TSI148_LCSR_OFFSET_ITSAL);
642 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
643 TSI148_LCSR_OFFSET_ITEAU);
644 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
645 TSI148_LCSR_OFFSET_ITEAL);
646 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
647 TSI148_LCSR_OFFSET_ITOFU);
648 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
649 TSI148_LCSR_OFFSET_ITOFL);
651 /* Setup 2eSST speeds */
652 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
653 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
655 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
658 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
661 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
665 /* Setup cycle types */
666 temp_ctl &= ~(0x1F << 7);
668 temp_ctl |= TSI148_LCSR_ITAT_BLT;
669 if (cycle & VME_MBLT)
670 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
671 if (cycle & VME_2eVME)
672 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
673 if (cycle & VME_2eSST)
674 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
675 if (cycle & VME_2eSSTB)
676 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
678 /* Setup address space */
679 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
683 if (cycle & VME_SUPER)
684 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
685 if (cycle & VME_USER)
686 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
687 if (cycle & VME_PROG)
688 temp_ctl |= TSI148_LCSR_ITAT_PGM;
689 if (cycle & VME_DATA)
690 temp_ctl |= TSI148_LCSR_ITAT_DATA;
692 /* Write ctl reg without enable */
693 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
694 TSI148_LCSR_OFFSET_ITAT);
697 temp_ctl |= TSI148_LCSR_ITAT_EN;
699 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
700 TSI148_LCSR_OFFSET_ITAT);
706 * Get slave window configuration.
708 int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
709 unsigned long long *vme_base, unsigned long long *size,
710 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
712 unsigned int i, granularity = 0, ctl = 0;
713 unsigned int vme_base_low, vme_base_high;
714 unsigned int vme_bound_low, vme_bound_high;
715 unsigned int pci_offset_low, pci_offset_high;
716 unsigned long long vme_bound, pci_offset;
717 struct tsi148_driver *bridge;
719 bridge = image->parent->driver_priv;
724 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
725 TSI148_LCSR_OFFSET_ITAT);
727 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
728 TSI148_LCSR_OFFSET_ITSAU);
729 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
730 TSI148_LCSR_OFFSET_ITSAL);
731 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
732 TSI148_LCSR_OFFSET_ITEAU);
733 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
734 TSI148_LCSR_OFFSET_ITEAL);
735 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
736 TSI148_LCSR_OFFSET_ITOFU);
737 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
738 TSI148_LCSR_OFFSET_ITOFL);
740 /* Convert 64-bit variables to 2x 32-bit variables */
741 reg_join(vme_base_high, vme_base_low, vme_base);
742 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
743 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
745 *pci_base = (dma_addr_t)vme_base + pci_offset;
751 if (ctl & TSI148_LCSR_ITAT_EN)
754 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
758 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
759 granularity = 0x1000;
762 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
763 granularity = 0x10000;
766 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
767 granularity = 0x10000;
771 /* Need granularity before we set the size */
772 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
775 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
776 *cycle |= VME_2eSST160;
777 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
778 *cycle |= VME_2eSST267;
779 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
780 *cycle |= VME_2eSST320;
782 if (ctl & TSI148_LCSR_ITAT_BLT)
784 if (ctl & TSI148_LCSR_ITAT_MBLT)
786 if (ctl & TSI148_LCSR_ITAT_2eVME)
788 if (ctl & TSI148_LCSR_ITAT_2eSST)
790 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
791 *cycle |= VME_2eSSTB;
793 if (ctl & TSI148_LCSR_ITAT_SUPR)
795 if (ctl & TSI148_LCSR_ITAT_NPRIV)
797 if (ctl & TSI148_LCSR_ITAT_PGM)
799 if (ctl & TSI148_LCSR_ITAT_DATA)
806 * Allocate and map PCI Resource
808 static int tsi148_alloc_resource(struct vme_master_resource *image,
809 unsigned long long size)
811 unsigned long long existing_size;
813 struct pci_dev *pdev;
814 struct vme_bridge *tsi148_bridge;
816 tsi148_bridge = image->parent;
818 /* Find pci_dev container of dev */
819 if (tsi148_bridge->parent == NULL) {
820 printk("Dev entry NULL\n");
823 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
825 existing_size = (unsigned long long)(image->bus_resource.end -
826 image->bus_resource.start);
828 /* If the existing size is OK, return */
829 if ((size != 0) && (existing_size == (size - 1)))
832 if (existing_size != 0) {
833 iounmap(image->kern_base);
834 image->kern_base = NULL;
835 if (image->bus_resource.name != NULL)
836 kfree(image->bus_resource.name);
837 release_resource(&(image->bus_resource));
838 memset(&(image->bus_resource), 0, sizeof(struct resource));
841 /* Exit here if size is zero */
846 if (image->bus_resource.name == NULL) {
847 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
848 if (image->bus_resource.name == NULL) {
849 printk(KERN_ERR "Unable to allocate memory for resource"
856 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
859 image->bus_resource.start = 0;
860 image->bus_resource.end = (unsigned long)size;
861 image->bus_resource.flags = IORESOURCE_MEM;
863 retval = pci_bus_alloc_resource(pdev->bus,
864 &(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
867 printk(KERN_ERR "Failed to allocate mem resource for "
868 "window %d size 0x%lx start 0x%lx\n",
869 image->number, (unsigned long)size,
870 (unsigned long)image->bus_resource.start);
874 image->kern_base = ioremap_nocache(
875 image->bus_resource.start, size);
876 if (image->kern_base == NULL) {
877 printk(KERN_ERR "Failed to remap resource\n");
884 iounmap(image->kern_base);
885 image->kern_base = NULL;
887 release_resource(&(image->bus_resource));
889 kfree(image->bus_resource.name);
890 memset(&(image->bus_resource), 0, sizeof(struct resource));
896 * Free and unmap PCI Resource
898 static void tsi148_free_resource(struct vme_master_resource *image)
900 iounmap(image->kern_base);
901 image->kern_base = NULL;
902 release_resource(&(image->bus_resource));
903 kfree(image->bus_resource.name);
904 memset(&(image->bus_resource), 0, sizeof(struct resource));
908 * Set the attributes of an outbound window.
910 int tsi148_master_set( struct vme_master_resource *image, int enabled,
911 unsigned long long vme_base, unsigned long long size,
912 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
916 unsigned int temp_ctl = 0;
917 unsigned int pci_base_low, pci_base_high;
918 unsigned int pci_bound_low, pci_bound_high;
919 unsigned int vme_offset_low, vme_offset_high;
920 unsigned long long pci_bound, vme_offset, pci_base;
921 struct tsi148_driver *bridge;
923 bridge = image->parent->driver_priv;
925 /* Verify input data */
926 if (vme_base & 0xFFFF) {
927 printk(KERN_ERR "Invalid VME Window alignment\n");
932 if ((size == 0) && (enabled != 0)) {
933 printk(KERN_ERR "Size must be non-zero for enabled windows\n");
938 spin_lock(&(image->lock));
940 /* Let's allocate the resource here rather than further up the stack as
941 * it avoids pushing loads of bus dependant stuff up the stack. If size
942 * is zero, any existing resource will be freed.
944 retval = tsi148_alloc_resource(image, size);
946 spin_unlock(&(image->lock));
947 printk(KERN_ERR "Unable to allocate memory for "
957 pci_base = (unsigned long long)image->bus_resource.start;
960 * Bound address is a valid address for the window, adjust
961 * according to window granularity.
963 pci_bound = pci_base + (size - 0x10000);
964 vme_offset = vme_base - pci_base;
967 /* Convert 64-bit variables to 2x 32-bit variables */
968 reg_split(pci_base, &pci_base_high, &pci_base_low);
969 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
970 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
972 if (pci_base_low & 0xFFFF) {
973 spin_unlock(&(image->lock));
974 printk(KERN_ERR "Invalid PCI base alignment\n");
978 if (pci_bound_low & 0xFFFF) {
979 spin_unlock(&(image->lock));
980 printk(KERN_ERR "Invalid PCI bound alignment\n");
984 if (vme_offset_low & 0xFFFF) {
985 spin_unlock(&(image->lock));
986 printk(KERN_ERR "Invalid VME Offset alignment\n");
993 /* Disable while we are mucking around */
994 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
995 TSI148_LCSR_OFFSET_OTAT);
996 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
997 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
998 TSI148_LCSR_OFFSET_OTAT);
1000 /* Setup 2eSST speeds */
1001 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
1002 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1004 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
1007 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
1010 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1014 /* Setup cycle types */
1015 if (cycle & VME_BLT) {
1016 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1017 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1019 if (cycle & VME_MBLT) {
1020 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1021 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1023 if (cycle & VME_2eVME) {
1024 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1025 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1027 if (cycle & VME_2eSST) {
1028 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1029 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1031 if (cycle & VME_2eSSTB) {
1032 printk(KERN_WARNING "Currently not setting Broadcast Select "
1034 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1035 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1038 /* Setup data width */
1039 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1042 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1045 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1048 spin_unlock(&(image->lock));
1049 printk(KERN_ERR "Invalid data width\n");
1054 /* Setup address space */
1055 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1058 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1061 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1064 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1067 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1070 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1073 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1076 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1079 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1082 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1085 spin_unlock(&(image->lock));
1086 printk(KERN_ERR "Invalid address space\n");
1092 temp_ctl &= ~(3<<4);
1093 if (cycle & VME_SUPER)
1094 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1095 if (cycle & VME_PROG)
1096 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1099 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1100 TSI148_LCSR_OFFSET_OTSAU);
1101 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1102 TSI148_LCSR_OFFSET_OTSAL);
1103 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1104 TSI148_LCSR_OFFSET_OTEAU);
1105 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1106 TSI148_LCSR_OFFSET_OTEAL);
1107 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1108 TSI148_LCSR_OFFSET_OTOFU);
1109 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1110 TSI148_LCSR_OFFSET_OTOFL);
1112 /* Write ctl reg without enable */
1113 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1114 TSI148_LCSR_OFFSET_OTAT);
1117 temp_ctl |= TSI148_LCSR_OTAT_EN;
1119 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1120 TSI148_LCSR_OFFSET_OTAT);
1122 spin_unlock(&(image->lock));
1128 tsi148_free_resource(image);
1136 * Set the attributes of an outbound window.
1138 * XXX Not parsing prefetch information.
1140 int __tsi148_master_get( struct vme_master_resource *image, int *enabled,
1141 unsigned long long *vme_base, unsigned long long *size,
1142 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1144 unsigned int i, ctl;
1145 unsigned int pci_base_low, pci_base_high;
1146 unsigned int pci_bound_low, pci_bound_high;
1147 unsigned int vme_offset_low, vme_offset_high;
1149 unsigned long long pci_base, pci_bound, vme_offset;
1150 struct tsi148_driver *bridge;
1152 bridge = image->parent->driver_priv;
1156 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1157 TSI148_LCSR_OFFSET_OTAT);
1159 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1160 TSI148_LCSR_OFFSET_OTSAU);
1161 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1162 TSI148_LCSR_OFFSET_OTSAL);
1163 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1164 TSI148_LCSR_OFFSET_OTEAU);
1165 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1166 TSI148_LCSR_OFFSET_OTEAL);
1167 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1168 TSI148_LCSR_OFFSET_OTOFU);
1169 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1170 TSI148_LCSR_OFFSET_OTOFL);
1172 /* Convert 64-bit variables to 2x 32-bit variables */
1173 reg_join(pci_base_high, pci_base_low, &pci_base);
1174 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1175 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1177 *vme_base = pci_base + vme_offset;
1178 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1185 if (ctl & TSI148_LCSR_OTAT_EN)
1188 /* Setup address space */
1189 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1191 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1193 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1195 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1197 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1198 *aspace |= VME_CRCSR;
1199 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1200 *aspace |= VME_USER1;
1201 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1202 *aspace |= VME_USER2;
1203 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1204 *aspace |= VME_USER3;
1205 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1206 *aspace |= VME_USER4;
1208 /* Setup 2eSST speeds */
1209 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1210 *cycle |= VME_2eSST160;
1211 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1212 *cycle |= VME_2eSST267;
1213 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1214 *cycle |= VME_2eSST320;
1216 /* Setup cycle types */
1217 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_SCT)
1219 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_BLT)
1221 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_MBLT)
1223 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eVME)
1224 *cycle |= VME_2eVME;
1225 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSST)
1226 *cycle |= VME_2eSST;
1227 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSSTB)
1228 *cycle |= VME_2eSSTB;
1230 if (ctl & TSI148_LCSR_OTAT_SUP)
1231 *cycle |= VME_SUPER;
1235 if (ctl & TSI148_LCSR_OTAT_PGM)
1240 /* Setup data width */
1241 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1243 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1250 int tsi148_master_get( struct vme_master_resource *image, int *enabled,
1251 unsigned long long *vme_base, unsigned long long *size,
1252 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1256 spin_lock(&(image->lock));
1258 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1261 spin_unlock(&(image->lock));
1266 ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1267 size_t count, loff_t offset)
1269 int retval, enabled;
1270 unsigned long long vme_base, size;
1271 vme_address_t aspace;
1274 struct vme_bus_error *vme_err = NULL;
1275 struct vme_bridge *tsi148_bridge;
1277 tsi148_bridge = image->parent;
1279 spin_lock(&(image->lock));
1281 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
1287 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1290 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1292 if(vme_err != NULL) {
1293 dev_err(image->parent->parent, "First VME read error detected "
1294 "an at address 0x%llx\n", vme_err->address);
1295 retval = vme_err->address - (vme_base + offset);
1296 /* Clear down save errors in this address range */
1297 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1302 spin_unlock(&(image->lock));
1308 ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1309 size_t count, loff_t offset)
1311 int retval = 0, enabled;
1312 unsigned long long vme_base, size;
1313 vme_address_t aspace;
1317 struct vme_bus_error *vme_err = NULL;
1318 struct vme_bridge *tsi148_bridge;
1319 struct tsi148_driver *bridge;
1321 tsi148_bridge = image->parent;
1323 bridge = tsi148_bridge->driver_priv;
1325 spin_lock(&(image->lock));
1327 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
1331 * Writes are posted. We need to do a read on the VME bus to flush out
1332 * all of the writes before we check for errors. We can't guarentee
1333 * that reading the data we have just written is safe. It is believed
1334 * that there isn't any read, write re-ordering, so we can read any
1335 * location in VME space, so lets read the Device ID from the tsi148's
1336 * own registers as mapped into CR/CSR space.
1338 * We check for saved errors in the written address range/space.
1345 * Get window info first, to maximise the time that the buffers may
1346 * fluch on their own
1348 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1351 ioread16(bridge->flush_image->kern_base + 0x7F000);
1353 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1355 if(vme_err != NULL) {
1356 printk("First VME write error detected an at address 0x%llx\n",
1358 retval = vme_err->address - (vme_base + offset);
1359 /* Clear down save errors in this address range */
1360 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1365 spin_unlock(&(image->lock));
1371 * Perform an RMW cycle on the VME bus.
1373 * Requires a previously configured master window, returns final value.
1375 unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1376 unsigned int mask, unsigned int compare, unsigned int swap,
1379 unsigned long long pci_addr;
1380 unsigned int pci_addr_high, pci_addr_low;
1383 struct tsi148_driver *bridge;
1385 bridge = image->parent->driver_priv;
1387 /* Find the PCI address that maps to the desired VME address */
1390 /* Locking as we can only do one of these at a time */
1391 mutex_lock(&(bridge->vme_rmw));
1394 spin_lock(&(image->lock));
1396 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1397 TSI148_LCSR_OFFSET_OTSAU);
1398 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1399 TSI148_LCSR_OFFSET_OTSAL);
1401 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1402 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1404 /* Configure registers */
1405 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1406 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1407 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1408 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1409 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1412 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1413 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1414 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1416 /* Kick process off with a read to the required address. */
1417 result = ioread32be(image->kern_base + offset);
1420 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1421 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1422 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1424 spin_unlock(&(image->lock));
1426 mutex_unlock(&(bridge->vme_rmw));
1431 static int tsi148_dma_set_vme_src_attributes (u32 *attr, vme_address_t aspace,
1432 vme_cycle_t cycle, vme_width_t dwidth)
1434 /* Setup 2eSST speeds */
1435 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1437 *attr |= TSI148_LCSR_DSAT_2eSSTM_160;
1440 *attr |= TSI148_LCSR_DSAT_2eSSTM_267;
1443 *attr |= TSI148_LCSR_DSAT_2eSSTM_320;
1447 /* Setup cycle types */
1448 if (cycle & VME_SCT) {
1449 *attr |= TSI148_LCSR_DSAT_TM_SCT;
1451 if (cycle & VME_BLT) {
1452 *attr |= TSI148_LCSR_DSAT_TM_BLT;
1454 if (cycle & VME_MBLT) {
1455 *attr |= TSI148_LCSR_DSAT_TM_MBLT;
1457 if (cycle & VME_2eVME) {
1458 *attr |= TSI148_LCSR_DSAT_TM_2eVME;
1460 if (cycle & VME_2eSST) {
1461 *attr |= TSI148_LCSR_DSAT_TM_2eSST;
1463 if (cycle & VME_2eSSTB) {
1464 printk("Currently not setting Broadcast Select Registers\n");
1465 *attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
1468 /* Setup data width */
1471 *attr |= TSI148_LCSR_DSAT_DBW_16;
1474 *attr |= TSI148_LCSR_DSAT_DBW_32;
1477 printk("Invalid data width\n");
1481 /* Setup address space */
1484 *attr |= TSI148_LCSR_DSAT_AMODE_A16;
1487 *attr |= TSI148_LCSR_DSAT_AMODE_A24;
1490 *attr |= TSI148_LCSR_DSAT_AMODE_A32;
1493 *attr |= TSI148_LCSR_DSAT_AMODE_A64;
1496 *attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1499 *attr |= TSI148_LCSR_DSAT_AMODE_USER1;
1502 *attr |= TSI148_LCSR_DSAT_AMODE_USER2;
1505 *attr |= TSI148_LCSR_DSAT_AMODE_USER3;
1508 *attr |= TSI148_LCSR_DSAT_AMODE_USER4;
1511 printk("Invalid address space\n");
1516 if (cycle & VME_SUPER)
1517 *attr |= TSI148_LCSR_DSAT_SUP;
1518 if (cycle & VME_PROG)
1519 *attr |= TSI148_LCSR_DSAT_PGM;
1524 static int tsi148_dma_set_vme_dest_attributes(u32 *attr, vme_address_t aspace,
1525 vme_cycle_t cycle, vme_width_t dwidth)
1527 /* Setup 2eSST speeds */
1528 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1530 *attr |= TSI148_LCSR_DDAT_2eSSTM_160;
1533 *attr |= TSI148_LCSR_DDAT_2eSSTM_267;
1536 *attr |= TSI148_LCSR_DDAT_2eSSTM_320;
1540 /* Setup cycle types */
1541 if (cycle & VME_SCT) {
1542 *attr |= TSI148_LCSR_DDAT_TM_SCT;
1544 if (cycle & VME_BLT) {
1545 *attr |= TSI148_LCSR_DDAT_TM_BLT;
1547 if (cycle & VME_MBLT) {
1548 *attr |= TSI148_LCSR_DDAT_TM_MBLT;
1550 if (cycle & VME_2eVME) {
1551 *attr |= TSI148_LCSR_DDAT_TM_2eVME;
1553 if (cycle & VME_2eSST) {
1554 *attr |= TSI148_LCSR_DDAT_TM_2eSST;
1556 if (cycle & VME_2eSSTB) {
1557 printk("Currently not setting Broadcast Select Registers\n");
1558 *attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
1561 /* Setup data width */
1564 *attr |= TSI148_LCSR_DDAT_DBW_16;
1567 *attr |= TSI148_LCSR_DDAT_DBW_32;
1570 printk("Invalid data width\n");
1574 /* Setup address space */
1577 *attr |= TSI148_LCSR_DDAT_AMODE_A16;
1580 *attr |= TSI148_LCSR_DDAT_AMODE_A24;
1583 *attr |= TSI148_LCSR_DDAT_AMODE_A32;
1586 *attr |= TSI148_LCSR_DDAT_AMODE_A64;
1589 *attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1592 *attr |= TSI148_LCSR_DDAT_AMODE_USER1;
1595 *attr |= TSI148_LCSR_DDAT_AMODE_USER2;
1598 *attr |= TSI148_LCSR_DDAT_AMODE_USER3;
1601 *attr |= TSI148_LCSR_DDAT_AMODE_USER4;
1604 printk("Invalid address space\n");
1609 if (cycle & VME_SUPER)
1610 *attr |= TSI148_LCSR_DDAT_SUP;
1611 if (cycle & VME_PROG)
1612 *attr |= TSI148_LCSR_DDAT_PGM;
1618 * Add a link list descriptor to the list
1620 int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
1621 struct vme_dma_attr *dest, size_t count)
1623 struct tsi148_dma_entry *entry, *prev;
1624 u32 address_high, address_low;
1625 struct vme_dma_pattern *pattern_attr;
1626 struct vme_dma_pci *pci_attr;
1627 struct vme_dma_vme *vme_attr;
1628 dma_addr_t desc_ptr;
1631 /* Descriptor must be aligned on 64-bit boundaries */
1632 entry = (struct tsi148_dma_entry *)kmalloc(
1633 sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1634 if (entry == NULL) {
1635 printk("Failed to allocate memory for dma resource "
1641 /* Test descriptor alignment */
1642 if ((unsigned long)&(entry->descriptor) & 0x7) {
1643 printk("Descriptor not aligned to 8 byte boundary as "
1644 "required: %p\n", &(entry->descriptor));
1649 /* Given we are going to fill out the structure, we probably don't
1650 * need to zero it, but better safe than sorry for now.
1652 memset(&(entry->descriptor), 0, sizeof(struct tsi148_dma_descriptor));
1654 /* Fill out source part */
1655 switch (src->type) {
1656 case VME_DMA_PATTERN:
1657 pattern_attr = (struct vme_dma_pattern *)src->private;
1659 entry->descriptor.dsal = pattern_attr->pattern;
1660 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
1661 /* Default behaviour is 32 bit pattern */
1662 if (pattern_attr->type & VME_DMA_PATTERN_BYTE) {
1663 entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
1665 /* It seems that the default behaviour is to increment */
1666 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0) {
1667 entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
1671 pci_attr = (struct vme_dma_pci *)src->private;
1673 reg_split((unsigned long long)pci_attr->address, &address_high,
1675 entry->descriptor.dsau = address_high;
1676 entry->descriptor.dsal = address_low;
1677 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
1680 vme_attr = (struct vme_dma_vme *)src->private;
1682 reg_split((unsigned long long)vme_attr->address, &address_high,
1684 entry->descriptor.dsau = address_high;
1685 entry->descriptor.dsal = address_low;
1686 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
1688 retval = tsi148_dma_set_vme_src_attributes(
1689 &(entry->descriptor.dsat), vme_attr->aspace,
1690 vme_attr->cycle, vme_attr->dwidth);
1695 printk("Invalid source type\n");
1701 /* Assume last link - this will be over-written by adding another */
1702 entry->descriptor.dnlau = 0;
1703 entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
1706 /* Fill out destination part */
1707 switch (dest->type) {
1709 pci_attr = (struct vme_dma_pci *)dest->private;
1711 reg_split((unsigned long long)pci_attr->address, &address_high,
1713 entry->descriptor.ddau = address_high;
1714 entry->descriptor.ddal = address_low;
1715 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
1718 vme_attr = (struct vme_dma_vme *)dest->private;
1720 reg_split((unsigned long long)vme_attr->address, &address_high,
1722 entry->descriptor.ddau = address_high;
1723 entry->descriptor.ddal = address_low;
1724 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
1726 retval = tsi148_dma_set_vme_dest_attributes(
1727 &(entry->descriptor.ddat), vme_attr->aspace,
1728 vme_attr->cycle, vme_attr->dwidth);
1733 printk("Invalid destination type\n");
1739 /* Fill out count */
1740 entry->descriptor.dcnt = (u32)count;
1743 list_add_tail(&(entry->list), &(list->entries));
1745 /* Fill out previous descriptors "Next Address" */
1746 if(entry->list.prev != &(list->entries)){
1747 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1749 /* We need the bus address for the pointer */
1750 desc_ptr = virt_to_bus(&(entry->descriptor));
1751 reg_split(desc_ptr, &(prev->descriptor.dnlau),
1752 &(prev->descriptor.dnlal));
1766 * Check to see if the provided DMA channel is busy.
1768 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1771 struct tsi148_driver *bridge;
1773 bridge = tsi148_bridge->driver_priv;
1775 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1776 TSI148_LCSR_OFFSET_DSTA);
1778 if (tmp & TSI148_LCSR_DSTA_BSY)
1786 * Execute a previously generated link list
1788 * XXX Need to provide control register configuration.
1790 int tsi148_dma_list_exec(struct vme_dma_list *list)
1792 struct vme_dma_resource *ctrlr;
1793 int channel, retval = 0;
1794 struct tsi148_dma_entry *entry;
1795 dma_addr_t bus_addr;
1796 u32 bus_addr_high, bus_addr_low;
1797 u32 val, dctlreg = 0;
1798 struct tsi148_driver *bridge;
1800 ctrlr = list->parent;
1802 bridge = ctrlr->parent->driver_priv;
1804 mutex_lock(&(ctrlr->mtx));
1806 channel = ctrlr->number;
1808 if (! list_empty(&(ctrlr->running))) {
1810 * XXX We have an active DMA transfer and currently haven't
1811 * sorted out the mechanism for "pending" DMA transfers.
1814 /* Need to add to pending here */
1815 mutex_unlock(&(ctrlr->mtx));
1818 list_add(&(list->list), &(ctrlr->running));
1821 /* Get first bus address and write into registers */
1822 entry = list_first_entry(&(list->entries), struct tsi148_dma_entry,
1825 bus_addr = virt_to_bus(&(entry->descriptor));
1827 mutex_unlock(&(ctrlr->mtx));
1829 reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
1831 iowrite32be(bus_addr_high, bridge->base +
1832 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1833 iowrite32be(bus_addr_low, bridge->base +
1834 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1836 /* Start the operation */
1837 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1838 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1840 wait_event_interruptible(bridge->dma_queue[channel],
1841 tsi148_dma_busy(ctrlr->parent, channel));
1843 * Read status register, this register is valid until we kick off a
1846 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1847 TSI148_LCSR_OFFSET_DSTA);
1849 if (val & TSI148_LCSR_DSTA_VBE) {
1850 printk(KERN_ERR "tsi148: DMA Error. DSTA=%08X\n", val);
1854 /* Remove list from running list */
1855 mutex_lock(&(ctrlr->mtx));
1856 list_del(&(list->list));
1857 mutex_unlock(&(ctrlr->mtx));
1863 * Clean up a previously generated link list
1865 * We have a separate function, don't assume that the chain can't be reused.
1867 int tsi148_dma_list_empty(struct vme_dma_list *list)
1869 struct list_head *pos, *temp;
1870 struct tsi148_dma_entry *entry;
1872 /* detach and free each entry */
1873 list_for_each_safe(pos, temp, &(list->entries)) {
1875 entry = list_entry(pos, struct tsi148_dma_entry, list);
1883 * All 4 location monitors reside at the same base - this is therefore a
1884 * system wide configuration.
1886 * This does not enable the LM monitor - that should be done when the first
1887 * callback is attached and disabled when the last callback is removed.
1889 int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1890 vme_address_t aspace, vme_cycle_t cycle)
1892 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1894 struct tsi148_driver *bridge;
1896 bridge = lm->parent->driver_priv;
1898 mutex_lock(&(lm->mtx));
1900 /* If we already have a callback attached, we can't move it! */
1901 for (i = 0; i < lm->monitors; i++) {
1902 if (bridge->lm_callback[i] != NULL) {
1903 mutex_unlock(&(lm->mtx));
1904 printk("Location monitor callback attached, can't "
1912 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1915 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1918 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1921 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1924 mutex_unlock(&(lm->mtx));
1925 printk("Invalid address space\n");
1930 if (cycle & VME_SUPER)
1931 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1932 if (cycle & VME_USER)
1933 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1934 if (cycle & VME_PROG)
1935 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1936 if (cycle & VME_DATA)
1937 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1939 reg_split(lm_base, &lm_base_high, &lm_base_low);
1941 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1942 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1943 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1945 mutex_unlock(&(lm->mtx));
1950 /* Get configuration of the callback monitor and return whether it is enabled
1953 int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
1954 vme_address_t *aspace, vme_cycle_t *cycle)
1956 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1957 struct tsi148_driver *bridge;
1959 bridge = lm->parent->driver_priv;
1961 mutex_lock(&(lm->mtx));
1963 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
1964 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
1965 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
1967 reg_join(lm_base_high, lm_base_low, lm_base);
1969 if (lm_ctl & TSI148_LCSR_LMAT_EN)
1972 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16) {
1975 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24) {
1978 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32) {
1981 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) {
1985 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
1986 *cycle |= VME_SUPER;
1987 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
1989 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
1991 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
1994 mutex_unlock(&(lm->mtx));
2000 * Attach a callback to a specific location monitor.
2002 * Callback will be passed the monitor triggered.
2004 int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2005 void (*callback)(int))
2008 struct tsi148_driver *bridge;
2010 bridge = lm->parent->driver_priv;
2012 mutex_lock(&(lm->mtx));
2014 /* Ensure that the location monitor is configured - need PGM or DATA */
2015 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2016 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2017 mutex_unlock(&(lm->mtx));
2018 printk("Location monitor not properly configured\n");
2022 /* Check that a callback isn't already attached */
2023 if (bridge->lm_callback[monitor] != NULL) {
2024 mutex_unlock(&(lm->mtx));
2025 printk("Existing callback attached\n");
2029 /* Attach callback */
2030 bridge->lm_callback[monitor] = callback;
2032 /* Enable Location Monitor interrupt */
2033 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2034 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2035 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2037 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2038 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2039 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2041 /* Ensure that global Location Monitor Enable set */
2042 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2043 lm_ctl |= TSI148_LCSR_LMAT_EN;
2044 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2047 mutex_unlock(&(lm->mtx));
2053 * Detach a callback function forn a specific location monitor.
2055 int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2058 struct tsi148_driver *bridge;
2060 bridge = lm->parent->driver_priv;
2062 mutex_lock(&(lm->mtx));
2064 /* Disable Location Monitor and ensure previous interrupts are clear */
2065 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2066 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2067 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2069 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2070 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2071 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2073 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2074 bridge->base + TSI148_LCSR_INTC);
2076 /* Detach callback */
2077 bridge->lm_callback[monitor] = NULL;
2079 /* If all location monitors disabled, disable global Location Monitor */
2080 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2081 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2082 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2083 tmp &= ~TSI148_LCSR_LMAT_EN;
2084 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2087 mutex_unlock(&(lm->mtx));
2093 * Determine Geographical Addressing
2095 int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2098 struct tsi148_driver *bridge;
2100 bridge = tsi148_bridge->driver_priv;
2103 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2104 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2111 static int __init tsi148_init(void)
2113 return pci_register_driver(&tsi148_driver);
2117 * Configure CR/CSR space
2119 * Access to the CR/CSR can be configured at power-up. The location of the
2120 * CR/CSR registers in the CR/CSR address space is determined by the boards
2121 * Auto-ID or Geographic address. This function ensures that the window is
2122 * enabled at an offset consistent with the boards geopgraphic address.
2124 * Each board has a 512kB window, with the highest 4kB being used for the
2125 * boards registers, this means there is a fix length 508kB window which must
2126 * be mapped onto PCI memory.
2128 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2129 struct pci_dev *pdev)
2131 u32 cbar, crat, vstat;
2132 u32 crcsr_bus_high, crcsr_bus_low;
2134 struct tsi148_driver *bridge;
2136 bridge = tsi148_bridge->driver_priv;
2138 /* Allocate mem for CR/CSR image */
2139 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2140 &(bridge->crcsr_bus));
2141 if (bridge->crcsr_kernel == NULL) {
2142 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
2147 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2149 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2151 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2152 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2154 /* Ensure that the CR/CSR is configured at the correct offset */
2155 cbar = ioread32be(bridge->base + TSI148_CBAR);
2156 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2158 vstat = tsi148_slot_get(tsi148_bridge);
2160 if (cbar != vstat) {
2162 dev_info(&pdev->dev, "Setting CR/CSR offset\n");
2163 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2165 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", cbar);
2167 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2168 if (crat & TSI148_LCSR_CRAT_EN) {
2169 dev_info(&pdev->dev, "Enabling CR/CSR space\n");
2170 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2171 bridge->base + TSI148_LCSR_CRAT);
2173 dev_info(&pdev->dev, "CR/CSR already enabled\n");
2175 /* If we want flushed, error-checked writes, set up a window
2176 * over the CR/CSR registers. We read from here to safely flush
2177 * through VME writes.
2180 retval = tsi148_master_set(bridge->flush_image, 1,
2181 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2184 dev_err(&pdev->dev, "Configuring flush image failed\n");
2191 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2192 struct pci_dev *pdev)
2195 struct tsi148_driver *bridge;
2197 bridge = tsi148_bridge->driver_priv;
2199 /* Turn off CR/CSR space */
2200 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2201 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2202 bridge->base + TSI148_LCSR_CRAT);
2205 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2206 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2208 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2212 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2214 int retval, i, master_num;
2216 struct list_head *pos = NULL;
2217 struct vme_bridge *tsi148_bridge;
2218 struct tsi148_driver *tsi148_device;
2219 struct vme_master_resource *master_image;
2220 struct vme_slave_resource *slave_image;
2221 struct vme_dma_resource *dma_ctrlr;
2222 struct vme_lm_resource *lm;
2224 /* If we want to support more than one of each bridge, we need to
2225 * dynamically generate this so we get one per device
2227 tsi148_bridge = (struct vme_bridge *)kmalloc(sizeof(struct vme_bridge),
2229 if (tsi148_bridge == NULL) {
2230 dev_err(&pdev->dev, "Failed to allocate memory for device "
2236 memset(tsi148_bridge, 0, sizeof(struct vme_bridge));
2238 tsi148_device = kmalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2239 if (tsi148_device == NULL) {
2240 dev_err(&pdev->dev, "Failed to allocate memory for device "
2246 memset(tsi148_device, 0, sizeof(struct tsi148_driver));
2248 tsi148_bridge->driver_priv = tsi148_device;
2250 /* Enable the device */
2251 retval = pci_enable_device(pdev);
2253 dev_err(&pdev->dev, "Unable to enable device\n");
2258 retval = pci_request_regions(pdev, driver_name);
2260 dev_err(&pdev->dev, "Unable to reserve resources\n");
2264 /* map registers in BAR 0 */
2265 tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2267 if (!tsi148_device->base) {
2268 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2273 /* Check to see if the mapping worked out */
2274 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2275 if (data != PCI_VENDOR_ID_TUNDRA) {
2276 dev_err(&pdev->dev, "CRG region check failed\n");
2281 /* Initialize wait queues & mutual exclusion flags */
2282 init_waitqueue_head(&(tsi148_device->dma_queue[0]));
2283 init_waitqueue_head(&(tsi148_device->dma_queue[1]));
2284 init_waitqueue_head(&(tsi148_device->iack_queue));
2285 mutex_init(&(tsi148_device->vme_int));
2286 mutex_init(&(tsi148_device->vme_rmw));
2288 tsi148_bridge->parent = &(pdev->dev);
2289 strcpy(tsi148_bridge->name, driver_name);
2292 retval = tsi148_irq_init(tsi148_bridge);
2294 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2298 /* If we are going to flush writes, we need to read from the VME bus.
2299 * We need to do this safely, thus we read the devices own CR/CSR
2300 * register. To do this we must set up a window in CR/CSR space and
2301 * hence have one less master window resource available.
2303 master_num = TSI148_MAX_MASTER;
2307 tsi148_device->flush_image = (struct vme_master_resource *)
2308 kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2309 if (tsi148_device->flush_image == NULL) {
2310 dev_err(&pdev->dev, "Failed to allocate memory for "
2311 "flush resource structure\n");
2315 tsi148_device->flush_image->parent = tsi148_bridge;
2316 spin_lock_init(&(tsi148_device->flush_image->lock));
2317 tsi148_device->flush_image->locked = 1;
2318 tsi148_device->flush_image->number = master_num;
2319 tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
2321 tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
2322 VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
2323 VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
2324 VME_USER | VME_PROG | VME_DATA;
2325 tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
2326 memset(&(tsi148_device->flush_image->bus_resource), 0,
2327 sizeof(struct resource));
2328 tsi148_device->flush_image->kern_base = NULL;
2331 /* Add master windows to list */
2332 INIT_LIST_HEAD(&(tsi148_bridge->master_resources));
2333 for (i = 0; i < master_num; i++) {
2334 master_image = (struct vme_master_resource *)kmalloc(
2335 sizeof(struct vme_master_resource), GFP_KERNEL);
2336 if (master_image == NULL) {
2337 dev_err(&pdev->dev, "Failed to allocate memory for "
2338 "master resource structure\n");
2342 master_image->parent = tsi148_bridge;
2343 spin_lock_init(&(master_image->lock));
2344 master_image->locked = 0;
2345 master_image->number = i;
2346 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2348 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2349 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2350 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2351 VME_PROG | VME_DATA;
2352 master_image->width_attr = VME_D16 | VME_D32;
2353 memset(&(master_image->bus_resource), 0,
2354 sizeof(struct resource));
2355 master_image->kern_base = NULL;
2356 list_add_tail(&(master_image->list),
2357 &(tsi148_bridge->master_resources));
2360 /* Add slave windows to list */
2361 INIT_LIST_HEAD(&(tsi148_bridge->slave_resources));
2362 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2363 slave_image = (struct vme_slave_resource *)kmalloc(
2364 sizeof(struct vme_slave_resource), GFP_KERNEL);
2365 if (slave_image == NULL) {
2366 dev_err(&pdev->dev, "Failed to allocate memory for "
2367 "slave resource structure\n");
2371 slave_image->parent = tsi148_bridge;
2372 mutex_init(&(slave_image->mtx));
2373 slave_image->locked = 0;
2374 slave_image->number = i;
2375 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2376 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2377 VME_USER3 | VME_USER4;
2378 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2379 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2380 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2381 VME_PROG | VME_DATA;
2382 list_add_tail(&(slave_image->list),
2383 &(tsi148_bridge->slave_resources));
2386 /* Add dma engines to list */
2387 INIT_LIST_HEAD(&(tsi148_bridge->dma_resources));
2388 for (i = 0; i < TSI148_MAX_DMA; i++) {
2389 dma_ctrlr = (struct vme_dma_resource *)kmalloc(
2390 sizeof(struct vme_dma_resource), GFP_KERNEL);
2391 if (dma_ctrlr == NULL) {
2392 dev_err(&pdev->dev, "Failed to allocate memory for "
2393 "dma resource structure\n");
2397 dma_ctrlr->parent = tsi148_bridge;
2398 mutex_init(&(dma_ctrlr->mtx));
2399 dma_ctrlr->locked = 0;
2400 dma_ctrlr->number = i;
2401 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2402 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2403 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2404 VME_DMA_PATTERN_TO_MEM;
2405 INIT_LIST_HEAD(&(dma_ctrlr->pending));
2406 INIT_LIST_HEAD(&(dma_ctrlr->running));
2407 list_add_tail(&(dma_ctrlr->list),
2408 &(tsi148_bridge->dma_resources));
2411 /* Add location monitor to list */
2412 INIT_LIST_HEAD(&(tsi148_bridge->lm_resources));
2413 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2415 dev_err(&pdev->dev, "Failed to allocate memory for "
2416 "location monitor resource structure\n");
2420 lm->parent = tsi148_bridge;
2421 mutex_init(&(lm->mtx));
2425 list_add_tail(&(lm->list), &(tsi148_bridge->lm_resources));
2427 tsi148_bridge->slave_get = tsi148_slave_get;
2428 tsi148_bridge->slave_set = tsi148_slave_set;
2429 tsi148_bridge->master_get = tsi148_master_get;
2430 tsi148_bridge->master_set = tsi148_master_set;
2431 tsi148_bridge->master_read = tsi148_master_read;
2432 tsi148_bridge->master_write = tsi148_master_write;
2433 tsi148_bridge->master_rmw = tsi148_master_rmw;
2434 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2435 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2436 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2437 tsi148_bridge->irq_set = tsi148_irq_set;
2438 tsi148_bridge->irq_generate = tsi148_irq_generate;
2439 tsi148_bridge->lm_set = tsi148_lm_set;
2440 tsi148_bridge->lm_get = tsi148_lm_get;
2441 tsi148_bridge->lm_attach = tsi148_lm_attach;
2442 tsi148_bridge->lm_detach = tsi148_lm_detach;
2443 tsi148_bridge->slot_get = tsi148_slot_get;
2445 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2446 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2447 (data & TSI148_LCSR_VSTAT_SCONS)? "" : " not");
2449 dev_info(&pdev->dev, "VME geographical address is %d\n",
2450 data & TSI148_LCSR_VSTAT_GA_M);
2452 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2455 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2456 err_chk ? "enabled" : "disabled");
2458 if (tsi148_crcsr_init(tsi148_bridge, pdev))
2459 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2462 retval = vme_register_bridge(tsi148_bridge);
2464 dev_err(&pdev->dev, "Chip Registration failed.\n");
2468 pci_set_drvdata(pdev, tsi148_bridge);
2470 /* Clear VME bus "board fail", and "power-up reset" lines */
2471 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2472 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2473 data |= TSI148_LCSR_VSTAT_CPURST;
2474 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2478 vme_unregister_bridge(tsi148_bridge);
2480 tsi148_crcsr_exit(tsi148_bridge, pdev);
2483 /* resources are stored in link list */
2484 list_for_each(pos, &(tsi148_bridge->lm_resources)) {
2485 lm = list_entry(pos, struct vme_lm_resource, list);
2490 /* resources are stored in link list */
2491 list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2492 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2497 /* resources are stored in link list */
2498 list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2499 slave_image = list_entry(pos, struct vme_slave_resource, list);
2504 /* resources are stored in link list */
2505 list_for_each(pos, &(tsi148_bridge->master_resources)) {
2506 master_image = list_entry(pos, struct vme_master_resource, list);
2508 kfree(master_image);
2511 tsi148_irq_exit(tsi148_device, pdev);
2514 iounmap(tsi148_device->base);
2516 pci_release_regions(pdev);
2518 pci_disable_device(pdev);
2520 kfree(tsi148_device);
2522 kfree(tsi148_bridge);
2528 static void tsi148_remove(struct pci_dev *pdev)
2530 struct list_head *pos = NULL;
2531 struct vme_master_resource *master_image;
2532 struct vme_slave_resource *slave_image;
2533 struct vme_dma_resource *dma_ctrlr;
2535 struct tsi148_driver *bridge;
2536 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2538 bridge = tsi148_bridge->driver_priv;
2541 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2544 * Shutdown all inbound and outbound windows.
2546 for (i = 0; i < 8; i++) {
2547 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2548 TSI148_LCSR_OFFSET_ITAT);
2549 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2550 TSI148_LCSR_OFFSET_OTAT);
2554 * Shutdown Location monitor.
2556 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2561 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2564 * Clear error status.
2566 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2567 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2568 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2571 * Remove VIRQ interrupt (if any)
2573 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2574 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2577 * Map all Interrupts to PCI INTA
2579 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2580 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2582 tsi148_irq_exit(bridge, pdev);
2584 vme_unregister_bridge(tsi148_bridge);
2586 tsi148_crcsr_exit(tsi148_bridge, pdev);
2588 /* resources are stored in link list */
2589 list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2590 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2595 /* resources are stored in link list */
2596 list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2597 slave_image = list_entry(pos, struct vme_slave_resource, list);
2602 /* resources are stored in link list */
2603 list_for_each(pos, &(tsi148_bridge->master_resources)) {
2604 master_image = list_entry(pos, struct vme_master_resource,
2607 kfree(master_image);
2610 tsi148_irq_exit(bridge, pdev);
2612 iounmap(bridge->base);
2614 pci_release_regions(pdev);
2616 pci_disable_device(pdev);
2618 kfree(tsi148_bridge->driver_priv);
2620 kfree(tsi148_bridge);
2623 static void __exit tsi148_exit(void)
2625 pci_unregister_driver(&tsi148_driver);
2627 printk(KERN_DEBUG "Driver removed.\n");
2630 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2631 module_param(err_chk, bool, 0);
2633 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2634 module_param(geoid, int, 0);
2636 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2637 MODULE_LICENSE("GPL");
2639 module_init(tsi148_init);
2640 module_exit(tsi148_exit);