2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
30 #include <asm/uaccess.h>
33 #include "../vme_bridge.h"
34 #include "vme_tsi148.h"
36 static int __init tsi148_init(void);
37 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
38 static void tsi148_remove(struct pci_dev *);
39 static void __exit tsi148_exit(void);
42 int tsi148_slave_set(struct vme_slave_resource *, int, unsigned long long,
43 unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
44 int tsi148_slave_get(struct vme_slave_resource *, int *, unsigned long long *,
45 unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
47 int tsi148_master_get(struct vme_master_resource *, int *, unsigned long long *,
48 unsigned long long *, vme_address_t *, vme_cycle_t *, vme_width_t *);
49 int tsi148_master_set(struct vme_master_resource *, int, unsigned long long,
50 unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
51 ssize_t tsi148_master_read(struct vme_master_resource *, void *, size_t,
53 ssize_t tsi148_master_write(struct vme_master_resource *, void *, size_t,
55 unsigned int tsi148_master_rmw(struct vme_master_resource *, unsigned int,
56 unsigned int, unsigned int, loff_t);
57 int tsi148_dma_list_add (struct vme_dma_list *, struct vme_dma_attr *,
58 struct vme_dma_attr *, size_t);
59 int tsi148_dma_list_exec(struct vme_dma_list *);
60 int tsi148_dma_list_empty(struct vme_dma_list *);
61 int tsi148_generate_irq(int, int);
63 /* Module parameter */
67 static char driver_name[] = "vme_tsi148";
69 static const struct pci_device_id tsi148_ids[] = {
70 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
74 static struct pci_driver tsi148_driver = {
76 .id_table = tsi148_ids,
77 .probe = tsi148_probe,
78 .remove = tsi148_remove,
81 static void reg_join(unsigned int high, unsigned int low,
82 unsigned long long *variable)
84 *variable = (unsigned long long)high << 32;
85 *variable |= (unsigned long long)low;
88 static void reg_split(unsigned long long variable, unsigned int *high,
91 *low = (unsigned int)variable & 0xFFFFFFFF;
92 *high = (unsigned int)(variable >> 32);
98 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
103 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
104 wake_up(&(bridge->dma_queue[0]));
105 serviced |= TSI148_LCSR_INTC_DMA0C;
107 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
108 wake_up(&(bridge->dma_queue[1]));
109 serviced |= TSI148_LCSR_INTC_DMA1C;
116 * Wake up location monitor queue
118 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
123 for (i = 0; i < 4; i++) {
124 if(stat & TSI148_LCSR_INTS_LMS[i]) {
125 /* We only enable interrupts if the callback is set */
126 bridge->lm_callback[i](i);
127 serviced |= TSI148_LCSR_INTC_LMC[i];
135 * Wake up mail box queue.
137 * XXX This functionality is not exposed up though API.
139 static u32 tsi148_MB_irqhandler(struct tsi148_driver *bridge, u32 stat)
145 for (i = 0; i < 4; i++) {
146 if(stat & TSI148_LCSR_INTS_MBS[i]) {
147 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
148 printk("VME Mailbox %d received: 0x%x\n", i, val);
149 serviced |= TSI148_LCSR_INTC_MBC[i];
157 * Display error & status message when PERR (PCI) exception interrupt occurs.
159 static u32 tsi148_PERR_irqhandler(struct tsi148_driver *bridge)
162 "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
163 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
164 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
165 ioread32be(bridge->base + TSI148_LCSR_EDPAT)
168 "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
169 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
170 ioread32be(bridge->base + TSI148_LCSR_EDPXS)
173 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
175 return TSI148_LCSR_INTC_PERRC;
179 * Save address and status when VME error interrupt occurs.
181 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
183 unsigned int error_addr_high, error_addr_low;
184 unsigned long long error_addr;
186 struct vme_bus_error *error;
187 struct tsi148_driver *bridge;
189 bridge = tsi148_bridge->driver_priv;
191 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
192 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
193 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
195 reg_join(error_addr_high, error_addr_low, &error_addr);
197 /* Check for exception register overflow (we have lost error data) */
198 if(error_attrib & TSI148_LCSR_VEAT_VEOF) {
199 printk(KERN_ERR "VME Bus Exception Overflow Occurred\n");
202 error = (struct vme_bus_error *)kmalloc(sizeof (struct vme_bus_error),
205 error->address = error_addr;
206 error->attributes = error_attrib;
207 list_add_tail(&(error->list), &(tsi148_bridge->vme_errors));
210 "Unable to alloc memory for VMEbus Error reporting\n");
212 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
213 error_addr, error_attrib);
217 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
219 return TSI148_LCSR_INTC_VERRC;
223 * Wake up IACK queue.
225 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
227 wake_up(&(bridge->iack_queue));
229 return TSI148_LCSR_INTC_IACKC;
233 * Calling VME bus interrupt callback if provided.
235 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
238 int vec, i, serviced = 0;
239 struct tsi148_driver *bridge;
241 bridge = tsi148_bridge->driver_priv;
243 for (i = 7; i > 0; i--) {
244 if (stat & (1 << i)) {
246 * Note: Even though the registers are defined
247 * as 32-bits in the spec, we only want to issue
248 * 8-bit IACK cycles on the bus, read from offset
251 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
253 vme_irq_handler(tsi148_bridge, i, vec);
255 serviced |= (1 << i);
263 * Top level interrupt handler. Clears appropriate interrupt status bits and
264 * then calls appropriate sub handler(s).
266 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
268 u32 stat, enable, serviced = 0;
269 struct vme_bridge *tsi148_bridge;
270 struct tsi148_driver *bridge;
274 bridge = tsi148_bridge->driver_priv;
276 /* Determine which interrupts are unmasked and set */
277 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
278 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
280 /* Only look at unmasked interrupts */
283 if (unlikely(!stat)) {
287 /* Call subhandlers as appropriate */
289 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
290 serviced |= tsi148_DMA_irqhandler(bridge, stat);
292 /* Location monitor irqs */
293 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
294 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
295 serviced |= tsi148_LM_irqhandler(bridge, stat);
298 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
299 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
300 serviced |= tsi148_MB_irqhandler(bridge, stat);
303 if (stat & TSI148_LCSR_INTS_PERRS)
304 serviced |= tsi148_PERR_irqhandler(bridge);
307 if (stat & TSI148_LCSR_INTS_VERRS)
308 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
311 if (stat & TSI148_LCSR_INTS_IACKS)
312 serviced |= tsi148_IACK_irqhandler(bridge);
315 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
316 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
317 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
318 TSI148_LCSR_INTS_IRQ1S))
319 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
321 /* Clear serviced interrupts */
322 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
327 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
331 struct pci_dev *pdev;
332 struct tsi148_driver *bridge;
334 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
336 bridge = tsi148_bridge->driver_priv;
338 /* Initialise list for VME bus errors */
339 INIT_LIST_HEAD(&(tsi148_bridge->vme_errors));
341 mutex_init(&(tsi148_bridge->irq_mtx));
343 result = request_irq(pdev->irq,
346 driver_name, tsi148_bridge);
348 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
353 /* Enable and unmask interrupts */
354 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
355 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
356 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
357 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
358 TSI148_LCSR_INTEO_IACKEO;
360 /* This leaves the following interrupts masked.
361 * TSI148_LCSR_INTEO_VIEEO
362 * TSI148_LCSR_INTEO_SYSFLEO
363 * TSI148_LCSR_INTEO_ACFLEO
366 /* Don't enable Location Monitor interrupts here - they will be
367 * enabled when the location monitors are properly configured and
368 * a callback has been attached.
369 * TSI148_LCSR_INTEO_LM0EO
370 * TSI148_LCSR_INTEO_LM1EO
371 * TSI148_LCSR_INTEO_LM2EO
372 * TSI148_LCSR_INTEO_LM3EO
375 /* Don't enable VME interrupts until we add a handler, else the board
376 * will respond to it and we don't want that unless it knows how to
377 * properly deal with it.
378 * TSI148_LCSR_INTEO_IRQ7EO
379 * TSI148_LCSR_INTEO_IRQ6EO
380 * TSI148_LCSR_INTEO_IRQ5EO
381 * TSI148_LCSR_INTEO_IRQ4EO
382 * TSI148_LCSR_INTEO_IRQ3EO
383 * TSI148_LCSR_INTEO_IRQ2EO
384 * TSI148_LCSR_INTEO_IRQ1EO
387 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
388 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
393 static void tsi148_irq_exit(struct tsi148_driver *bridge, struct pci_dev *pdev)
395 /* Turn off interrupts */
396 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
397 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
399 /* Clear all interrupts */
400 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
402 /* Detach interrupt handler */
403 free_irq(pdev->irq, pdev);
407 * Check to see if an IACk has been received, return true (1) or false (0).
409 int tsi148_iack_received(struct tsi148_driver *bridge)
413 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
415 if (tmp & TSI148_LCSR_VICR_IRQS)
422 * Configure VME interrupt
424 void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
427 struct pci_dev *pdev;
429 struct tsi148_driver *bridge;
431 bridge = tsi148_bridge->driver_priv;
433 /* We need to do the ordering differently for enabling and disabling */
435 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
436 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
437 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
439 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
440 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
441 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
444 pdev = container_of(tsi148_bridge->parent,
445 struct pci_dev, dev);
447 synchronize_irq(pdev->irq);
450 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
451 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
452 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
454 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
455 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
456 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
461 * Generate a VME bus interrupt at the requested level & vector. Wait for
462 * interrupt to be acked.
464 int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, int statid)
467 struct tsi148_driver *bridge;
469 bridge = tsi148_bridge->driver_priv;
471 mutex_lock(&(bridge->vme_int));
473 /* Read VICR register */
474 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
477 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
478 (statid & TSI148_LCSR_VICR_STID_M);
479 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
481 /* Assert VMEbus IRQ */
482 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
483 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
485 /* XXX Consider implementing a timeout? */
486 wait_event_interruptible(bridge->iack_queue,
487 tsi148_iack_received(bridge));
489 mutex_unlock(&(bridge->vme_int));
495 * Find the first error in this address range
497 static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
498 vme_address_t aspace, unsigned long long address, size_t count)
500 struct list_head *err_pos;
501 struct vme_bus_error *vme_err, *valid = NULL;
502 unsigned long long bound;
504 bound = address + count;
507 * XXX We are currently not looking at the address space when parsing
508 * for errors. This is because parsing the Address Modifier Codes
509 * is going to be quite resource intensive to do properly. We
510 * should be OK just looking at the addresses and this is certainly
511 * much better than what we had before.
514 /* Iterate through errors */
515 list_for_each(err_pos, &(tsi148_bridge->vme_errors)) {
516 vme_err = list_entry(err_pos, struct vme_bus_error, list);
517 if((vme_err->address >= address) && (vme_err->address < bound)){
527 * Clear errors in the provided address range.
529 static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
530 vme_address_t aspace, unsigned long long address, size_t count)
532 struct list_head *err_pos, *temp;
533 struct vme_bus_error *vme_err;
534 unsigned long long bound;
536 bound = address + count;
539 * XXX We are currently not looking at the address space when parsing
540 * for errors. This is because parsing the Address Modifier Codes
541 * is going to be quite resource intensive to do properly. We
542 * should be OK just looking at the addresses and this is certainly
543 * much better than what we had before.
546 /* Iterate through errors */
547 list_for_each_safe(err_pos, temp, &(tsi148_bridge->vme_errors)) {
548 vme_err = list_entry(err_pos, struct vme_bus_error, list);
550 if((vme_err->address >= address) && (vme_err->address < bound)){
558 * Initialize a slave window with the requested attributes.
560 int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
561 unsigned long long vme_base, unsigned long long size,
562 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
564 unsigned int i, addr = 0, granularity = 0;
565 unsigned int temp_ctl = 0;
566 unsigned int vme_base_low, vme_base_high;
567 unsigned int vme_bound_low, vme_bound_high;
568 unsigned int pci_offset_low, pci_offset_high;
569 unsigned long long vme_bound, pci_offset;
570 struct tsi148_driver *bridge;
572 bridge = image->parent->driver_priv;
579 addr |= TSI148_LCSR_ITAT_AS_A16;
582 granularity = 0x1000;
583 addr |= TSI148_LCSR_ITAT_AS_A24;
586 granularity = 0x10000;
587 addr |= TSI148_LCSR_ITAT_AS_A32;
590 granularity = 0x10000;
591 addr |= TSI148_LCSR_ITAT_AS_A64;
599 printk("Invalid address space\n");
604 /* Convert 64-bit variables to 2x 32-bit variables */
605 reg_split(vme_base, &vme_base_high, &vme_base_low);
608 * Bound address is a valid address for the window, adjust
611 vme_bound = vme_base + size - granularity;
612 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
613 pci_offset = (unsigned long long)pci_base - vme_base;
614 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
616 if (vme_base_low & (granularity - 1)) {
617 printk("Invalid VME base alignment\n");
620 if (vme_bound_low & (granularity - 1)) {
621 printk("Invalid VME bound alignment\n");
624 if (pci_offset_low & (granularity - 1)) {
625 printk("Invalid PCI Offset alignment\n");
629 /* Disable while we are mucking around */
630 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
631 TSI148_LCSR_OFFSET_ITAT);
632 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
633 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
634 TSI148_LCSR_OFFSET_ITAT);
637 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
638 TSI148_LCSR_OFFSET_ITSAU);
639 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
640 TSI148_LCSR_OFFSET_ITSAL);
641 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
642 TSI148_LCSR_OFFSET_ITEAU);
643 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
644 TSI148_LCSR_OFFSET_ITEAL);
645 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
646 TSI148_LCSR_OFFSET_ITOFU);
647 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
648 TSI148_LCSR_OFFSET_ITOFL);
650 /* Setup 2eSST speeds */
651 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
652 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
654 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
657 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
660 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
664 /* Setup cycle types */
665 temp_ctl &= ~(0x1F << 7);
667 temp_ctl |= TSI148_LCSR_ITAT_BLT;
668 if (cycle & VME_MBLT)
669 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
670 if (cycle & VME_2eVME)
671 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
672 if (cycle & VME_2eSST)
673 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
674 if (cycle & VME_2eSSTB)
675 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
677 /* Setup address space */
678 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
682 if (cycle & VME_SUPER)
683 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
684 if (cycle & VME_USER)
685 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
686 if (cycle & VME_PROG)
687 temp_ctl |= TSI148_LCSR_ITAT_PGM;
688 if (cycle & VME_DATA)
689 temp_ctl |= TSI148_LCSR_ITAT_DATA;
691 /* Write ctl reg without enable */
692 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
693 TSI148_LCSR_OFFSET_ITAT);
696 temp_ctl |= TSI148_LCSR_ITAT_EN;
698 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
699 TSI148_LCSR_OFFSET_ITAT);
705 * Get slave window configuration.
707 int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
708 unsigned long long *vme_base, unsigned long long *size,
709 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
711 unsigned int i, granularity = 0, ctl = 0;
712 unsigned int vme_base_low, vme_base_high;
713 unsigned int vme_bound_low, vme_bound_high;
714 unsigned int pci_offset_low, pci_offset_high;
715 unsigned long long vme_bound, pci_offset;
716 struct tsi148_driver *bridge;
718 bridge = image->parent->driver_priv;
723 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
724 TSI148_LCSR_OFFSET_ITAT);
726 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
727 TSI148_LCSR_OFFSET_ITSAU);
728 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
729 TSI148_LCSR_OFFSET_ITSAL);
730 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
731 TSI148_LCSR_OFFSET_ITEAU);
732 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
733 TSI148_LCSR_OFFSET_ITEAL);
734 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
735 TSI148_LCSR_OFFSET_ITOFU);
736 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
737 TSI148_LCSR_OFFSET_ITOFL);
739 /* Convert 64-bit variables to 2x 32-bit variables */
740 reg_join(vme_base_high, vme_base_low, vme_base);
741 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
742 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
744 *pci_base = (dma_addr_t)vme_base + pci_offset;
750 if (ctl & TSI148_LCSR_ITAT_EN)
753 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
757 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
758 granularity = 0x1000;
761 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
762 granularity = 0x10000;
765 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
766 granularity = 0x10000;
770 /* Need granularity before we set the size */
771 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
774 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
775 *cycle |= VME_2eSST160;
776 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
777 *cycle |= VME_2eSST267;
778 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
779 *cycle |= VME_2eSST320;
781 if (ctl & TSI148_LCSR_ITAT_BLT)
783 if (ctl & TSI148_LCSR_ITAT_MBLT)
785 if (ctl & TSI148_LCSR_ITAT_2eVME)
787 if (ctl & TSI148_LCSR_ITAT_2eSST)
789 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
790 *cycle |= VME_2eSSTB;
792 if (ctl & TSI148_LCSR_ITAT_SUPR)
794 if (ctl & TSI148_LCSR_ITAT_NPRIV)
796 if (ctl & TSI148_LCSR_ITAT_PGM)
798 if (ctl & TSI148_LCSR_ITAT_DATA)
805 * Allocate and map PCI Resource
807 static int tsi148_alloc_resource(struct vme_master_resource *image,
808 unsigned long long size)
810 unsigned long long existing_size;
812 struct pci_dev *pdev;
813 struct vme_bridge *tsi148_bridge;
815 tsi148_bridge = image->parent;
817 /* Find pci_dev container of dev */
818 if (tsi148_bridge->parent == NULL) {
819 printk("Dev entry NULL\n");
822 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
824 existing_size = (unsigned long long)(image->bus_resource.end -
825 image->bus_resource.start);
827 /* If the existing size is OK, return */
828 if ((size != 0) && (existing_size == (size - 1)))
831 if (existing_size != 0) {
832 iounmap(image->kern_base);
833 image->kern_base = NULL;
834 if (image->bus_resource.name != NULL)
835 kfree(image->bus_resource.name);
836 release_resource(&(image->bus_resource));
837 memset(&(image->bus_resource), 0, sizeof(struct resource));
840 /* Exit here if size is zero */
845 if (image->bus_resource.name == NULL) {
846 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
847 if (image->bus_resource.name == NULL) {
848 printk(KERN_ERR "Unable to allocate memory for resource"
855 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
858 image->bus_resource.start = 0;
859 image->bus_resource.end = (unsigned long)size;
860 image->bus_resource.flags = IORESOURCE_MEM;
862 retval = pci_bus_alloc_resource(pdev->bus,
863 &(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
866 printk(KERN_ERR "Failed to allocate mem resource for "
867 "window %d size 0x%lx start 0x%lx\n",
868 image->number, (unsigned long)size,
869 (unsigned long)image->bus_resource.start);
873 image->kern_base = ioremap_nocache(
874 image->bus_resource.start, size);
875 if (image->kern_base == NULL) {
876 printk(KERN_ERR "Failed to remap resource\n");
883 iounmap(image->kern_base);
884 image->kern_base = NULL;
886 release_resource(&(image->bus_resource));
888 kfree(image->bus_resource.name);
889 memset(&(image->bus_resource), 0, sizeof(struct resource));
895 * Free and unmap PCI Resource
897 static void tsi148_free_resource(struct vme_master_resource *image)
899 iounmap(image->kern_base);
900 image->kern_base = NULL;
901 release_resource(&(image->bus_resource));
902 kfree(image->bus_resource.name);
903 memset(&(image->bus_resource), 0, sizeof(struct resource));
907 * Set the attributes of an outbound window.
909 int tsi148_master_set( struct vme_master_resource *image, int enabled,
910 unsigned long long vme_base, unsigned long long size,
911 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
915 unsigned int temp_ctl = 0;
916 unsigned int pci_base_low, pci_base_high;
917 unsigned int pci_bound_low, pci_bound_high;
918 unsigned int vme_offset_low, vme_offset_high;
919 unsigned long long pci_bound, vme_offset, pci_base;
920 struct tsi148_driver *bridge;
922 bridge = image->parent->driver_priv;
924 /* Verify input data */
925 if (vme_base & 0xFFFF) {
926 printk(KERN_ERR "Invalid VME Window alignment\n");
931 if ((size == 0) && (enabled != 0)) {
932 printk(KERN_ERR "Size must be non-zero for enabled windows\n");
937 spin_lock(&(image->lock));
939 /* Let's allocate the resource here rather than further up the stack as
940 * it avoids pushing loads of bus dependant stuff up the stack. If size
941 * is zero, any existing resource will be freed.
943 retval = tsi148_alloc_resource(image, size);
945 spin_unlock(&(image->lock));
946 printk(KERN_ERR "Unable to allocate memory for "
956 pci_base = (unsigned long long)image->bus_resource.start;
959 * Bound address is a valid address for the window, adjust
960 * according to window granularity.
962 pci_bound = pci_base + (size - 0x10000);
963 vme_offset = vme_base - pci_base;
966 /* Convert 64-bit variables to 2x 32-bit variables */
967 reg_split(pci_base, &pci_base_high, &pci_base_low);
968 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
969 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
971 if (pci_base_low & 0xFFFF) {
972 spin_unlock(&(image->lock));
973 printk(KERN_ERR "Invalid PCI base alignment\n");
977 if (pci_bound_low & 0xFFFF) {
978 spin_unlock(&(image->lock));
979 printk(KERN_ERR "Invalid PCI bound alignment\n");
983 if (vme_offset_low & 0xFFFF) {
984 spin_unlock(&(image->lock));
985 printk(KERN_ERR "Invalid VME Offset alignment\n");
992 /* Disable while we are mucking around */
993 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
994 TSI148_LCSR_OFFSET_OTAT);
995 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
996 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
997 TSI148_LCSR_OFFSET_OTAT);
999 /* Setup 2eSST speeds */
1000 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
1001 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1003 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
1006 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
1009 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1013 /* Setup cycle types */
1014 if (cycle & VME_BLT) {
1015 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1016 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1018 if (cycle & VME_MBLT) {
1019 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1020 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1022 if (cycle & VME_2eVME) {
1023 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1024 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1026 if (cycle & VME_2eSST) {
1027 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1028 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1030 if (cycle & VME_2eSSTB) {
1031 printk(KERN_WARNING "Currently not setting Broadcast Select "
1033 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1034 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1037 /* Setup data width */
1038 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1041 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1044 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1047 spin_unlock(&(image->lock));
1048 printk(KERN_ERR "Invalid data width\n");
1053 /* Setup address space */
1054 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1057 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1060 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1063 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1066 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1069 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1072 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1075 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1078 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1081 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1084 spin_unlock(&(image->lock));
1085 printk(KERN_ERR "Invalid address space\n");
1091 temp_ctl &= ~(3<<4);
1092 if (cycle & VME_SUPER)
1093 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1094 if (cycle & VME_PROG)
1095 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1098 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1099 TSI148_LCSR_OFFSET_OTSAU);
1100 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1101 TSI148_LCSR_OFFSET_OTSAL);
1102 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1103 TSI148_LCSR_OFFSET_OTEAU);
1104 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1105 TSI148_LCSR_OFFSET_OTEAL);
1106 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1107 TSI148_LCSR_OFFSET_OTOFU);
1108 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1109 TSI148_LCSR_OFFSET_OTOFL);
1111 /* Write ctl reg without enable */
1112 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1113 TSI148_LCSR_OFFSET_OTAT);
1116 temp_ctl |= TSI148_LCSR_OTAT_EN;
1118 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1119 TSI148_LCSR_OFFSET_OTAT);
1121 spin_unlock(&(image->lock));
1127 tsi148_free_resource(image);
1135 * Set the attributes of an outbound window.
1137 * XXX Not parsing prefetch information.
1139 int __tsi148_master_get( struct vme_master_resource *image, int *enabled,
1140 unsigned long long *vme_base, unsigned long long *size,
1141 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1143 unsigned int i, ctl;
1144 unsigned int pci_base_low, pci_base_high;
1145 unsigned int pci_bound_low, pci_bound_high;
1146 unsigned int vme_offset_low, vme_offset_high;
1148 unsigned long long pci_base, pci_bound, vme_offset;
1149 struct tsi148_driver *bridge;
1151 bridge = image->parent->driver_priv;
1155 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1156 TSI148_LCSR_OFFSET_OTAT);
1158 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1159 TSI148_LCSR_OFFSET_OTSAU);
1160 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1161 TSI148_LCSR_OFFSET_OTSAL);
1162 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1163 TSI148_LCSR_OFFSET_OTEAU);
1164 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1165 TSI148_LCSR_OFFSET_OTEAL);
1166 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1167 TSI148_LCSR_OFFSET_OTOFU);
1168 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1169 TSI148_LCSR_OFFSET_OTOFL);
1171 /* Convert 64-bit variables to 2x 32-bit variables */
1172 reg_join(pci_base_high, pci_base_low, &pci_base);
1173 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1174 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1176 *vme_base = pci_base + vme_offset;
1177 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1184 if (ctl & TSI148_LCSR_OTAT_EN)
1187 /* Setup address space */
1188 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1190 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1192 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1194 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1196 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1197 *aspace |= VME_CRCSR;
1198 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1199 *aspace |= VME_USER1;
1200 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1201 *aspace |= VME_USER2;
1202 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1203 *aspace |= VME_USER3;
1204 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1205 *aspace |= VME_USER4;
1207 /* Setup 2eSST speeds */
1208 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1209 *cycle |= VME_2eSST160;
1210 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1211 *cycle |= VME_2eSST267;
1212 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1213 *cycle |= VME_2eSST320;
1215 /* Setup cycle types */
1216 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_SCT)
1218 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_BLT)
1220 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_MBLT)
1222 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eVME)
1223 *cycle |= VME_2eVME;
1224 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSST)
1225 *cycle |= VME_2eSST;
1226 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSSTB)
1227 *cycle |= VME_2eSSTB;
1229 if (ctl & TSI148_LCSR_OTAT_SUP)
1230 *cycle |= VME_SUPER;
1234 if (ctl & TSI148_LCSR_OTAT_PGM)
1239 /* Setup data width */
1240 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1242 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1249 int tsi148_master_get( struct vme_master_resource *image, int *enabled,
1250 unsigned long long *vme_base, unsigned long long *size,
1251 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1255 spin_lock(&(image->lock));
1257 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1260 spin_unlock(&(image->lock));
1265 ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1266 size_t count, loff_t offset)
1268 int retval, enabled;
1269 unsigned long long vme_base, size;
1270 vme_address_t aspace;
1273 struct vme_bus_error *vme_err = NULL;
1274 struct vme_bridge *tsi148_bridge;
1276 tsi148_bridge = image->parent;
1278 spin_lock(&(image->lock));
1280 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
1286 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1289 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1291 if(vme_err != NULL) {
1292 dev_err(image->parent->parent, "First VME read error detected "
1293 "an at address 0x%llx\n", vme_err->address);
1294 retval = vme_err->address - (vme_base + offset);
1295 /* Clear down save errors in this address range */
1296 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1301 spin_unlock(&(image->lock));
1307 ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1308 size_t count, loff_t offset)
1310 int retval = 0, enabled;
1311 unsigned long long vme_base, size;
1312 vme_address_t aspace;
1316 struct vme_bus_error *vme_err = NULL;
1317 struct vme_bridge *tsi148_bridge;
1318 struct tsi148_driver *bridge;
1320 tsi148_bridge = image->parent;
1322 bridge = tsi148_bridge->driver_priv;
1324 spin_lock(&(image->lock));
1326 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
1330 * Writes are posted. We need to do a read on the VME bus to flush out
1331 * all of the writes before we check for errors. We can't guarentee
1332 * that reading the data we have just written is safe. It is believed
1333 * that there isn't any read, write re-ordering, so we can read any
1334 * location in VME space, so lets read the Device ID from the tsi148's
1335 * own registers as mapped into CR/CSR space.
1337 * We check for saved errors in the written address range/space.
1344 * Get window info first, to maximise the time that the buffers may
1345 * fluch on their own
1347 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1350 ioread16(bridge->flush_image->kern_base + 0x7F000);
1352 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1354 if(vme_err != NULL) {
1355 printk("First VME write error detected an at address 0x%llx\n",
1357 retval = vme_err->address - (vme_base + offset);
1358 /* Clear down save errors in this address range */
1359 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1364 spin_unlock(&(image->lock));
1370 * Perform an RMW cycle on the VME bus.
1372 * Requires a previously configured master window, returns final value.
1374 unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1375 unsigned int mask, unsigned int compare, unsigned int swap,
1378 unsigned long long pci_addr;
1379 unsigned int pci_addr_high, pci_addr_low;
1382 struct tsi148_driver *bridge;
1384 bridge = image->parent->driver_priv;
1386 /* Find the PCI address that maps to the desired VME address */
1389 /* Locking as we can only do one of these at a time */
1390 mutex_lock(&(bridge->vme_rmw));
1393 spin_lock(&(image->lock));
1395 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1396 TSI148_LCSR_OFFSET_OTSAU);
1397 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1398 TSI148_LCSR_OFFSET_OTSAL);
1400 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1401 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1403 /* Configure registers */
1404 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1405 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1406 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1407 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1408 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1411 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1412 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1413 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1415 /* Kick process off with a read to the required address. */
1416 result = ioread32be(image->kern_base + offset);
1419 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1420 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1421 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1423 spin_unlock(&(image->lock));
1425 mutex_unlock(&(bridge->vme_rmw));
1430 static int tsi148_dma_set_vme_src_attributes (u32 *attr, vme_address_t aspace,
1431 vme_cycle_t cycle, vme_width_t dwidth)
1433 /* Setup 2eSST speeds */
1434 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1436 *attr |= TSI148_LCSR_DSAT_2eSSTM_160;
1439 *attr |= TSI148_LCSR_DSAT_2eSSTM_267;
1442 *attr |= TSI148_LCSR_DSAT_2eSSTM_320;
1446 /* Setup cycle types */
1447 if (cycle & VME_SCT) {
1448 *attr |= TSI148_LCSR_DSAT_TM_SCT;
1450 if (cycle & VME_BLT) {
1451 *attr |= TSI148_LCSR_DSAT_TM_BLT;
1453 if (cycle & VME_MBLT) {
1454 *attr |= TSI148_LCSR_DSAT_TM_MBLT;
1456 if (cycle & VME_2eVME) {
1457 *attr |= TSI148_LCSR_DSAT_TM_2eVME;
1459 if (cycle & VME_2eSST) {
1460 *attr |= TSI148_LCSR_DSAT_TM_2eSST;
1462 if (cycle & VME_2eSSTB) {
1463 printk("Currently not setting Broadcast Select Registers\n");
1464 *attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
1467 /* Setup data width */
1470 *attr |= TSI148_LCSR_DSAT_DBW_16;
1473 *attr |= TSI148_LCSR_DSAT_DBW_32;
1476 printk("Invalid data width\n");
1480 /* Setup address space */
1483 *attr |= TSI148_LCSR_DSAT_AMODE_A16;
1486 *attr |= TSI148_LCSR_DSAT_AMODE_A24;
1489 *attr |= TSI148_LCSR_DSAT_AMODE_A32;
1492 *attr |= TSI148_LCSR_DSAT_AMODE_A64;
1495 *attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1498 *attr |= TSI148_LCSR_DSAT_AMODE_USER1;
1501 *attr |= TSI148_LCSR_DSAT_AMODE_USER2;
1504 *attr |= TSI148_LCSR_DSAT_AMODE_USER3;
1507 *attr |= TSI148_LCSR_DSAT_AMODE_USER4;
1510 printk("Invalid address space\n");
1515 if (cycle & VME_SUPER)
1516 *attr |= TSI148_LCSR_DSAT_SUP;
1517 if (cycle & VME_PROG)
1518 *attr |= TSI148_LCSR_DSAT_PGM;
1523 static int tsi148_dma_set_vme_dest_attributes(u32 *attr, vme_address_t aspace,
1524 vme_cycle_t cycle, vme_width_t dwidth)
1526 /* Setup 2eSST speeds */
1527 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1529 *attr |= TSI148_LCSR_DDAT_2eSSTM_160;
1532 *attr |= TSI148_LCSR_DDAT_2eSSTM_267;
1535 *attr |= TSI148_LCSR_DDAT_2eSSTM_320;
1539 /* Setup cycle types */
1540 if (cycle & VME_SCT) {
1541 *attr |= TSI148_LCSR_DDAT_TM_SCT;
1543 if (cycle & VME_BLT) {
1544 *attr |= TSI148_LCSR_DDAT_TM_BLT;
1546 if (cycle & VME_MBLT) {
1547 *attr |= TSI148_LCSR_DDAT_TM_MBLT;
1549 if (cycle & VME_2eVME) {
1550 *attr |= TSI148_LCSR_DDAT_TM_2eVME;
1552 if (cycle & VME_2eSST) {
1553 *attr |= TSI148_LCSR_DDAT_TM_2eSST;
1555 if (cycle & VME_2eSSTB) {
1556 printk("Currently not setting Broadcast Select Registers\n");
1557 *attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
1560 /* Setup data width */
1563 *attr |= TSI148_LCSR_DDAT_DBW_16;
1566 *attr |= TSI148_LCSR_DDAT_DBW_32;
1569 printk("Invalid data width\n");
1573 /* Setup address space */
1576 *attr |= TSI148_LCSR_DDAT_AMODE_A16;
1579 *attr |= TSI148_LCSR_DDAT_AMODE_A24;
1582 *attr |= TSI148_LCSR_DDAT_AMODE_A32;
1585 *attr |= TSI148_LCSR_DDAT_AMODE_A64;
1588 *attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1591 *attr |= TSI148_LCSR_DDAT_AMODE_USER1;
1594 *attr |= TSI148_LCSR_DDAT_AMODE_USER2;
1597 *attr |= TSI148_LCSR_DDAT_AMODE_USER3;
1600 *attr |= TSI148_LCSR_DDAT_AMODE_USER4;
1603 printk("Invalid address space\n");
1608 if (cycle & VME_SUPER)
1609 *attr |= TSI148_LCSR_DDAT_SUP;
1610 if (cycle & VME_PROG)
1611 *attr |= TSI148_LCSR_DDAT_PGM;
1617 * Add a link list descriptor to the list
1619 int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
1620 struct vme_dma_attr *dest, size_t count)
1622 struct tsi148_dma_entry *entry, *prev;
1623 u32 address_high, address_low;
1624 struct vme_dma_pattern *pattern_attr;
1625 struct vme_dma_pci *pci_attr;
1626 struct vme_dma_vme *vme_attr;
1627 dma_addr_t desc_ptr;
1630 /* Descriptor must be aligned on 64-bit boundaries */
1631 entry = (struct tsi148_dma_entry *)kmalloc(
1632 sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1633 if (entry == NULL) {
1634 printk("Failed to allocate memory for dma resource "
1640 /* Test descriptor alignment */
1641 if ((unsigned long)&(entry->descriptor) & 0x7) {
1642 printk("Descriptor not aligned to 8 byte boundary as "
1643 "required: %p\n", &(entry->descriptor));
1648 /* Given we are going to fill out the structure, we probably don't
1649 * need to zero it, but better safe than sorry for now.
1651 memset(&(entry->descriptor), 0, sizeof(struct tsi148_dma_descriptor));
1653 /* Fill out source part */
1654 switch (src->type) {
1655 case VME_DMA_PATTERN:
1656 pattern_attr = (struct vme_dma_pattern *)src->private;
1658 entry->descriptor.dsal = pattern_attr->pattern;
1659 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
1660 /* Default behaviour is 32 bit pattern */
1661 if (pattern_attr->type & VME_DMA_PATTERN_BYTE) {
1662 entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
1664 /* It seems that the default behaviour is to increment */
1665 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0) {
1666 entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
1670 pci_attr = (struct vme_dma_pci *)src->private;
1672 reg_split((unsigned long long)pci_attr->address, &address_high,
1674 entry->descriptor.dsau = address_high;
1675 entry->descriptor.dsal = address_low;
1676 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
1679 vme_attr = (struct vme_dma_vme *)src->private;
1681 reg_split((unsigned long long)vme_attr->address, &address_high,
1683 entry->descriptor.dsau = address_high;
1684 entry->descriptor.dsal = address_low;
1685 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
1687 retval = tsi148_dma_set_vme_src_attributes(
1688 &(entry->descriptor.dsat), vme_attr->aspace,
1689 vme_attr->cycle, vme_attr->dwidth);
1694 printk("Invalid source type\n");
1700 /* Assume last link - this will be over-written by adding another */
1701 entry->descriptor.dnlau = 0;
1702 entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
1705 /* Fill out destination part */
1706 switch (dest->type) {
1708 pci_attr = (struct vme_dma_pci *)dest->private;
1710 reg_split((unsigned long long)pci_attr->address, &address_high,
1712 entry->descriptor.ddau = address_high;
1713 entry->descriptor.ddal = address_low;
1714 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
1717 vme_attr = (struct vme_dma_vme *)dest->private;
1719 reg_split((unsigned long long)vme_attr->address, &address_high,
1721 entry->descriptor.ddau = address_high;
1722 entry->descriptor.ddal = address_low;
1723 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
1725 retval = tsi148_dma_set_vme_dest_attributes(
1726 &(entry->descriptor.ddat), vme_attr->aspace,
1727 vme_attr->cycle, vme_attr->dwidth);
1732 printk("Invalid destination type\n");
1738 /* Fill out count */
1739 entry->descriptor.dcnt = (u32)count;
1742 list_add_tail(&(entry->list), &(list->entries));
1744 /* Fill out previous descriptors "Next Address" */
1745 if(entry->list.prev != &(list->entries)){
1746 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1748 /* We need the bus address for the pointer */
1749 desc_ptr = virt_to_bus(&(entry->descriptor));
1750 reg_split(desc_ptr, &(prev->descriptor.dnlau),
1751 &(prev->descriptor.dnlal));
1765 * Check to see if the provided DMA channel is busy.
1767 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1770 struct tsi148_driver *bridge;
1772 bridge = tsi148_bridge->driver_priv;
1774 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1775 TSI148_LCSR_OFFSET_DSTA);
1777 if (tmp & TSI148_LCSR_DSTA_BSY)
1785 * Execute a previously generated link list
1787 * XXX Need to provide control register configuration.
1789 int tsi148_dma_list_exec(struct vme_dma_list *list)
1791 struct vme_dma_resource *ctrlr;
1792 int channel, retval = 0;
1793 struct tsi148_dma_entry *entry;
1794 dma_addr_t bus_addr;
1795 u32 bus_addr_high, bus_addr_low;
1796 u32 val, dctlreg = 0;
1797 struct tsi148_driver *bridge;
1799 ctrlr = list->parent;
1801 bridge = ctrlr->parent->driver_priv;
1803 mutex_lock(&(ctrlr->mtx));
1805 channel = ctrlr->number;
1807 if (! list_empty(&(ctrlr->running))) {
1809 * XXX We have an active DMA transfer and currently haven't
1810 * sorted out the mechanism for "pending" DMA transfers.
1813 /* Need to add to pending here */
1814 mutex_unlock(&(ctrlr->mtx));
1817 list_add(&(list->list), &(ctrlr->running));
1820 /* Get first bus address and write into registers */
1821 entry = list_first_entry(&(list->entries), struct tsi148_dma_entry,
1824 bus_addr = virt_to_bus(&(entry->descriptor));
1826 mutex_unlock(&(ctrlr->mtx));
1828 reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
1830 iowrite32be(bus_addr_high, bridge->base +
1831 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1832 iowrite32be(bus_addr_low, bridge->base +
1833 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1835 /* Start the operation */
1836 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1837 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1839 wait_event_interruptible(bridge->dma_queue[channel],
1840 tsi148_dma_busy(ctrlr->parent, channel));
1842 * Read status register, this register is valid until we kick off a
1845 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1846 TSI148_LCSR_OFFSET_DSTA);
1848 if (val & TSI148_LCSR_DSTA_VBE) {
1849 printk(KERN_ERR "tsi148: DMA Error. DSTA=%08X\n", val);
1853 /* Remove list from running list */
1854 mutex_lock(&(ctrlr->mtx));
1855 list_del(&(list->list));
1856 mutex_unlock(&(ctrlr->mtx));
1862 * Clean up a previously generated link list
1864 * We have a separate function, don't assume that the chain can't be reused.
1866 int tsi148_dma_list_empty(struct vme_dma_list *list)
1868 struct list_head *pos, *temp;
1869 struct tsi148_dma_entry *entry;
1871 /* detach and free each entry */
1872 list_for_each_safe(pos, temp, &(list->entries)) {
1874 entry = list_entry(pos, struct tsi148_dma_entry, list);
1882 * All 4 location monitors reside at the same base - this is therefore a
1883 * system wide configuration.
1885 * This does not enable the LM monitor - that should be done when the first
1886 * callback is attached and disabled when the last callback is removed.
1888 int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1889 vme_address_t aspace, vme_cycle_t cycle)
1891 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1893 struct tsi148_driver *bridge;
1895 bridge = lm->parent->driver_priv;
1897 mutex_lock(&(lm->mtx));
1899 /* If we already have a callback attached, we can't move it! */
1900 for (i = 0; i < lm->monitors; i++) {
1901 if (bridge->lm_callback[i] != NULL) {
1902 mutex_unlock(&(lm->mtx));
1903 printk("Location monitor callback attached, can't "
1911 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1914 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1917 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1920 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1923 mutex_unlock(&(lm->mtx));
1924 printk("Invalid address space\n");
1929 if (cycle & VME_SUPER)
1930 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1931 if (cycle & VME_USER)
1932 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1933 if (cycle & VME_PROG)
1934 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1935 if (cycle & VME_DATA)
1936 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1938 reg_split(lm_base, &lm_base_high, &lm_base_low);
1940 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1941 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1942 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1944 mutex_unlock(&(lm->mtx));
1949 /* Get configuration of the callback monitor and return whether it is enabled
1952 int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
1953 vme_address_t *aspace, vme_cycle_t *cycle)
1955 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1956 struct tsi148_driver *bridge;
1958 bridge = lm->parent->driver_priv;
1960 mutex_lock(&(lm->mtx));
1962 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
1963 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
1964 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
1966 reg_join(lm_base_high, lm_base_low, lm_base);
1968 if (lm_ctl & TSI148_LCSR_LMAT_EN)
1971 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16) {
1974 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24) {
1977 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32) {
1980 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) {
1984 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
1985 *cycle |= VME_SUPER;
1986 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
1988 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
1990 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
1993 mutex_unlock(&(lm->mtx));
1999 * Attach a callback to a specific location monitor.
2001 * Callback will be passed the monitor triggered.
2003 int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2004 void (*callback)(int))
2007 struct tsi148_driver *bridge;
2009 bridge = lm->parent->driver_priv;
2011 mutex_lock(&(lm->mtx));
2013 /* Ensure that the location monitor is configured - need PGM or DATA */
2014 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2015 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2016 mutex_unlock(&(lm->mtx));
2017 printk("Location monitor not properly configured\n");
2021 /* Check that a callback isn't already attached */
2022 if (bridge->lm_callback[monitor] != NULL) {
2023 mutex_unlock(&(lm->mtx));
2024 printk("Existing callback attached\n");
2028 /* Attach callback */
2029 bridge->lm_callback[monitor] = callback;
2031 /* Enable Location Monitor interrupt */
2032 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2033 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2034 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2036 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2037 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2038 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2040 /* Ensure that global Location Monitor Enable set */
2041 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2042 lm_ctl |= TSI148_LCSR_LMAT_EN;
2043 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2046 mutex_unlock(&(lm->mtx));
2052 * Detach a callback function forn a specific location monitor.
2054 int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2057 struct tsi148_driver *bridge;
2059 bridge = lm->parent->driver_priv;
2061 mutex_lock(&(lm->mtx));
2063 /* Disable Location Monitor and ensure previous interrupts are clear */
2064 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2065 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2066 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2068 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2069 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2070 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2072 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2073 bridge->base + TSI148_LCSR_INTC);
2075 /* Detach callback */
2076 bridge->lm_callback[monitor] = NULL;
2078 /* If all location monitors disabled, disable global Location Monitor */
2079 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2080 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2081 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2082 tmp &= ~TSI148_LCSR_LMAT_EN;
2083 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2086 mutex_unlock(&(lm->mtx));
2092 * Determine Geographical Addressing
2094 int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2097 struct tsi148_driver *bridge;
2099 bridge = tsi148_bridge->driver_priv;
2102 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2103 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2110 static int __init tsi148_init(void)
2112 return pci_register_driver(&tsi148_driver);
2116 * Configure CR/CSR space
2118 * Access to the CR/CSR can be configured at power-up. The location of the
2119 * CR/CSR registers in the CR/CSR address space is determined by the boards
2120 * Auto-ID or Geographic address. This function ensures that the window is
2121 * enabled at an offset consistent with the boards geopgraphic address.
2123 * Each board has a 512kB window, with the highest 4kB being used for the
2124 * boards registers, this means there is a fix length 508kB window which must
2125 * be mapped onto PCI memory.
2127 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2128 struct pci_dev *pdev)
2130 u32 cbar, crat, vstat;
2131 u32 crcsr_bus_high, crcsr_bus_low;
2133 struct tsi148_driver *bridge;
2135 bridge = tsi148_bridge->driver_priv;
2137 /* Allocate mem for CR/CSR image */
2138 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2139 &(bridge->crcsr_bus));
2140 if (bridge->crcsr_kernel == NULL) {
2141 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
2146 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2148 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2150 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2151 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2153 /* Ensure that the CR/CSR is configured at the correct offset */
2154 cbar = ioread32be(bridge->base + TSI148_CBAR);
2155 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2157 vstat = tsi148_slot_get(tsi148_bridge);
2159 if (cbar != vstat) {
2161 dev_info(&pdev->dev, "Setting CR/CSR offset\n");
2162 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2164 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", cbar);
2166 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2167 if (crat & TSI148_LCSR_CRAT_EN) {
2168 dev_info(&pdev->dev, "Enabling CR/CSR space\n");
2169 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2170 bridge->base + TSI148_LCSR_CRAT);
2172 dev_info(&pdev->dev, "CR/CSR already enabled\n");
2174 /* If we want flushed, error-checked writes, set up a window
2175 * over the CR/CSR registers. We read from here to safely flush
2176 * through VME writes.
2179 retval = tsi148_master_set(bridge->flush_image, 1,
2180 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2183 dev_err(&pdev->dev, "Configuring flush image failed\n");
2190 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2191 struct pci_dev *pdev)
2194 struct tsi148_driver *bridge;
2196 bridge = tsi148_bridge->driver_priv;
2198 /* Turn off CR/CSR space */
2199 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2200 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2201 bridge->base + TSI148_LCSR_CRAT);
2204 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2205 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2207 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2211 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2213 int retval, i, master_num;
2215 struct list_head *pos = NULL;
2216 struct vme_bridge *tsi148_bridge;
2217 struct tsi148_driver *tsi148_device;
2218 struct vme_master_resource *master_image;
2219 struct vme_slave_resource *slave_image;
2220 struct vme_dma_resource *dma_ctrlr;
2221 struct vme_lm_resource *lm;
2223 /* If we want to support more than one of each bridge, we need to
2224 * dynamically generate this so we get one per device
2226 tsi148_bridge = (struct vme_bridge *)kmalloc(sizeof(struct vme_bridge),
2228 if (tsi148_bridge == NULL) {
2229 dev_err(&pdev->dev, "Failed to allocate memory for device "
2235 memset(tsi148_bridge, 0, sizeof(struct vme_bridge));
2237 tsi148_device = kmalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2238 if (tsi148_device == NULL) {
2239 dev_err(&pdev->dev, "Failed to allocate memory for device "
2245 memset(tsi148_device, 0, sizeof(struct tsi148_driver));
2247 tsi148_bridge->driver_priv = tsi148_device;
2249 /* Enable the device */
2250 retval = pci_enable_device(pdev);
2252 dev_err(&pdev->dev, "Unable to enable device\n");
2257 retval = pci_request_regions(pdev, driver_name);
2259 dev_err(&pdev->dev, "Unable to reserve resources\n");
2263 /* map registers in BAR 0 */
2264 tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2266 if (!tsi148_device->base) {
2267 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2272 /* Check to see if the mapping worked out */
2273 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2274 if (data != PCI_VENDOR_ID_TUNDRA) {
2275 dev_err(&pdev->dev, "CRG region check failed\n");
2280 /* Initialize wait queues & mutual exclusion flags */
2281 init_waitqueue_head(&(tsi148_device->dma_queue[0]));
2282 init_waitqueue_head(&(tsi148_device->dma_queue[1]));
2283 init_waitqueue_head(&(tsi148_device->iack_queue));
2284 mutex_init(&(tsi148_device->vme_int));
2285 mutex_init(&(tsi148_device->vme_rmw));
2287 tsi148_bridge->parent = &(pdev->dev);
2288 strcpy(tsi148_bridge->name, driver_name);
2291 retval = tsi148_irq_init(tsi148_bridge);
2293 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2297 /* If we are going to flush writes, we need to read from the VME bus.
2298 * We need to do this safely, thus we read the devices own CR/CSR
2299 * register. To do this we must set up a window in CR/CSR space and
2300 * hence have one less master window resource available.
2302 master_num = TSI148_MAX_MASTER;
2306 tsi148_device->flush_image = (struct vme_master_resource *)
2307 kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2308 if (tsi148_device->flush_image == NULL) {
2309 dev_err(&pdev->dev, "Failed to allocate memory for "
2310 "flush resource structure\n");
2314 tsi148_device->flush_image->parent = tsi148_bridge;
2315 spin_lock_init(&(tsi148_device->flush_image->lock));
2316 tsi148_device->flush_image->locked = 1;
2317 tsi148_device->flush_image->number = master_num;
2318 tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
2320 tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
2321 VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
2322 VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
2323 VME_USER | VME_PROG | VME_DATA;
2324 tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
2325 memset(&(tsi148_device->flush_image->bus_resource), 0,
2326 sizeof(struct resource));
2327 tsi148_device->flush_image->kern_base = NULL;
2330 /* Add master windows to list */
2331 INIT_LIST_HEAD(&(tsi148_bridge->master_resources));
2332 for (i = 0; i < master_num; i++) {
2333 master_image = (struct vme_master_resource *)kmalloc(
2334 sizeof(struct vme_master_resource), GFP_KERNEL);
2335 if (master_image == NULL) {
2336 dev_err(&pdev->dev, "Failed to allocate memory for "
2337 "master resource structure\n");
2341 master_image->parent = tsi148_bridge;
2342 spin_lock_init(&(master_image->lock));
2343 master_image->locked = 0;
2344 master_image->number = i;
2345 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2347 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2348 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2349 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2350 VME_PROG | VME_DATA;
2351 master_image->width_attr = VME_D16 | VME_D32;
2352 memset(&(master_image->bus_resource), 0,
2353 sizeof(struct resource));
2354 master_image->kern_base = NULL;
2355 list_add_tail(&(master_image->list),
2356 &(tsi148_bridge->master_resources));
2359 /* Add slave windows to list */
2360 INIT_LIST_HEAD(&(tsi148_bridge->slave_resources));
2361 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2362 slave_image = (struct vme_slave_resource *)kmalloc(
2363 sizeof(struct vme_slave_resource), GFP_KERNEL);
2364 if (slave_image == NULL) {
2365 dev_err(&pdev->dev, "Failed to allocate memory for "
2366 "slave resource structure\n");
2370 slave_image->parent = tsi148_bridge;
2371 mutex_init(&(slave_image->mtx));
2372 slave_image->locked = 0;
2373 slave_image->number = i;
2374 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2375 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2376 VME_USER3 | VME_USER4;
2377 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2378 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2379 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2380 VME_PROG | VME_DATA;
2381 list_add_tail(&(slave_image->list),
2382 &(tsi148_bridge->slave_resources));
2385 /* Add dma engines to list */
2386 INIT_LIST_HEAD(&(tsi148_bridge->dma_resources));
2387 for (i = 0; i < TSI148_MAX_DMA; i++) {
2388 dma_ctrlr = (struct vme_dma_resource *)kmalloc(
2389 sizeof(struct vme_dma_resource), GFP_KERNEL);
2390 if (dma_ctrlr == NULL) {
2391 dev_err(&pdev->dev, "Failed to allocate memory for "
2392 "dma resource structure\n");
2396 dma_ctrlr->parent = tsi148_bridge;
2397 mutex_init(&(dma_ctrlr->mtx));
2398 dma_ctrlr->locked = 0;
2399 dma_ctrlr->number = i;
2400 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2401 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2402 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2403 VME_DMA_PATTERN_TO_MEM;
2404 INIT_LIST_HEAD(&(dma_ctrlr->pending));
2405 INIT_LIST_HEAD(&(dma_ctrlr->running));
2406 list_add_tail(&(dma_ctrlr->list),
2407 &(tsi148_bridge->dma_resources));
2410 /* Add location monitor to list */
2411 INIT_LIST_HEAD(&(tsi148_bridge->lm_resources));
2412 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2414 dev_err(&pdev->dev, "Failed to allocate memory for "
2415 "location monitor resource structure\n");
2419 lm->parent = tsi148_bridge;
2420 mutex_init(&(lm->mtx));
2424 list_add_tail(&(lm->list), &(tsi148_bridge->lm_resources));
2426 tsi148_bridge->slave_get = tsi148_slave_get;
2427 tsi148_bridge->slave_set = tsi148_slave_set;
2428 tsi148_bridge->master_get = tsi148_master_get;
2429 tsi148_bridge->master_set = tsi148_master_set;
2430 tsi148_bridge->master_read = tsi148_master_read;
2431 tsi148_bridge->master_write = tsi148_master_write;
2432 tsi148_bridge->master_rmw = tsi148_master_rmw;
2433 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2434 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2435 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2436 tsi148_bridge->irq_set = tsi148_irq_set;
2437 tsi148_bridge->irq_generate = tsi148_irq_generate;
2438 tsi148_bridge->lm_set = tsi148_lm_set;
2439 tsi148_bridge->lm_get = tsi148_lm_get;
2440 tsi148_bridge->lm_attach = tsi148_lm_attach;
2441 tsi148_bridge->lm_detach = tsi148_lm_detach;
2442 tsi148_bridge->slot_get = tsi148_slot_get;
2444 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2445 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2446 (data & TSI148_LCSR_VSTAT_SCONS)? "" : " not");
2448 dev_info(&pdev->dev, "VME geographical address is %d\n",
2449 data & TSI148_LCSR_VSTAT_GA_M);
2451 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2454 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2455 err_chk ? "enabled" : "disabled");
2457 if (tsi148_crcsr_init(tsi148_bridge, pdev))
2458 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2461 retval = vme_register_bridge(tsi148_bridge);
2463 dev_err(&pdev->dev, "Chip Registration failed.\n");
2467 pci_set_drvdata(pdev, tsi148_bridge);
2469 /* Clear VME bus "board fail", and "power-up reset" lines */
2470 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2471 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2472 data |= TSI148_LCSR_VSTAT_CPURST;
2473 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2477 vme_unregister_bridge(tsi148_bridge);
2479 tsi148_crcsr_exit(tsi148_bridge, pdev);
2482 /* resources are stored in link list */
2483 list_for_each(pos, &(tsi148_bridge->lm_resources)) {
2484 lm = list_entry(pos, struct vme_lm_resource, list);
2489 /* resources are stored in link list */
2490 list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2491 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2496 /* resources are stored in link list */
2497 list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2498 slave_image = list_entry(pos, struct vme_slave_resource, list);
2503 /* resources are stored in link list */
2504 list_for_each(pos, &(tsi148_bridge->master_resources)) {
2505 master_image = list_entry(pos, struct vme_master_resource, list);
2507 kfree(master_image);
2510 tsi148_irq_exit(tsi148_device, pdev);
2513 iounmap(tsi148_device->base);
2515 pci_release_regions(pdev);
2517 pci_disable_device(pdev);
2519 kfree(tsi148_device);
2521 kfree(tsi148_bridge);
2527 static void tsi148_remove(struct pci_dev *pdev)
2529 struct list_head *pos = NULL;
2530 struct vme_master_resource *master_image;
2531 struct vme_slave_resource *slave_image;
2532 struct vme_dma_resource *dma_ctrlr;
2534 struct tsi148_driver *bridge;
2535 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2537 bridge = tsi148_bridge->driver_priv;
2540 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2543 * Shutdown all inbound and outbound windows.
2545 for (i = 0; i < 8; i++) {
2546 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2547 TSI148_LCSR_OFFSET_ITAT);
2548 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2549 TSI148_LCSR_OFFSET_OTAT);
2553 * Shutdown Location monitor.
2555 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2560 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2563 * Clear error status.
2565 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2566 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2567 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2570 * Remove VIRQ interrupt (if any)
2572 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2573 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2576 * Map all Interrupts to PCI INTA
2578 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2579 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2581 tsi148_irq_exit(bridge, pdev);
2583 vme_unregister_bridge(tsi148_bridge);
2585 tsi148_crcsr_exit(tsi148_bridge, pdev);
2587 /* resources are stored in link list */
2588 list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2589 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2594 /* resources are stored in link list */
2595 list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2596 slave_image = list_entry(pos, struct vme_slave_resource, list);
2601 /* resources are stored in link list */
2602 list_for_each(pos, &(tsi148_bridge->master_resources)) {
2603 master_image = list_entry(pos, struct vme_master_resource,
2606 kfree(master_image);
2609 tsi148_irq_exit(bridge, pdev);
2611 iounmap(bridge->base);
2613 pci_release_regions(pdev);
2615 pci_disable_device(pdev);
2617 kfree(tsi148_bridge->driver_priv);
2619 kfree(tsi148_bridge);
2622 static void __exit tsi148_exit(void)
2624 pci_unregister_driver(&tsi148_driver);
2626 printk(KERN_DEBUG "Driver removed.\n");
2629 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2630 module_param(err_chk, bool, 0);
2632 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2633 module_param(geoid, int, 0);
2635 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2636 MODULE_LICENSE("GPL");
2638 module_init(tsi148_init);
2639 module_exit(tsi148_exit);