4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
34 #include "vme_bridge.h"
36 /* Bitmask and mutex to keep track of bridge numbers */
37 static unsigned int vme_bus_numbers;
38 DEFINE_MUTEX(vme_bus_num_mtx);
40 static void __exit vme_exit(void);
41 static int __init vme_init(void);
45 * Find the bridge resource associated with a specific device resource
47 static struct vme_bridge *dev_to_bridge(struct device *dev)
49 return dev->platform_data;
53 * Find the bridge that the resource is associated with.
55 static struct vme_bridge *find_bridge(struct vme_resource *resource)
57 /* Get list to search */
58 switch (resource->type) {
60 return list_entry(resource->entry, struct vme_master_resource,
64 return list_entry(resource->entry, struct vme_slave_resource,
68 return list_entry(resource->entry, struct vme_dma_resource,
72 return list_entry(resource->entry, struct vme_lm_resource,
76 printk(KERN_ERR "Unknown resource type\n");
83 * Allocate a contiguous block of memory for use by the driver. This is used to
84 * create the buffers for the slave windows.
86 * XXX VME bridges could be available on buses other than PCI. At the momment
87 * this framework only supports PCI devices.
89 void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
92 struct vme_bridge *bridge;
95 if (resource == NULL) {
96 printk(KERN_ERR "No resource\n");
100 bridge = find_bridge(resource);
101 if (bridge == NULL) {
102 printk(KERN_ERR "Can't find bridge\n");
106 /* Find pci_dev container of dev */
107 if (bridge->parent == NULL) {
108 printk(KERN_ERR "Dev entry NULL\n");
111 pdev = container_of(bridge->parent, struct pci_dev, dev);
113 return pci_alloc_consistent(pdev, size, dma);
115 EXPORT_SYMBOL(vme_alloc_consistent);
118 * Free previously allocated contiguous block of memory.
120 * XXX VME bridges could be available on buses other than PCI. At the momment
121 * this framework only supports PCI devices.
123 void vme_free_consistent(struct vme_resource *resource, size_t size,
124 void *vaddr, dma_addr_t dma)
126 struct vme_bridge *bridge;
127 struct pci_dev *pdev;
129 if (resource == NULL) {
130 printk(KERN_ERR "No resource\n");
134 bridge = find_bridge(resource);
135 if (bridge == NULL) {
136 printk(KERN_ERR "Can't find bridge\n");
140 /* Find pci_dev container of dev */
141 pdev = container_of(bridge->parent, struct pci_dev, dev);
143 pci_free_consistent(pdev, size, vaddr, dma);
145 EXPORT_SYMBOL(vme_free_consistent);
147 size_t vme_get_size(struct vme_resource *resource)
150 unsigned long long base, size;
152 vme_address_t aspace;
156 switch (resource->type) {
158 retval = vme_master_get(resource, &enabled, &base, &size,
159 &aspace, &cycle, &dwidth);
164 retval = vme_slave_get(resource, &enabled, &base, &size,
165 &buf_base, &aspace, &cycle);
173 printk(KERN_ERR "Unknown resource type\n");
178 EXPORT_SYMBOL(vme_get_size);
180 static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
181 unsigned long long size)
187 if (((vme_base + size) > VME_A16_MAX) ||
188 (vme_base > VME_A16_MAX))
192 if (((vme_base + size) > VME_A24_MAX) ||
193 (vme_base > VME_A24_MAX))
197 if (((vme_base + size) > VME_A32_MAX) ||
198 (vme_base > VME_A32_MAX))
203 * Any value held in an unsigned long long can be used as the
208 if (((vme_base + size) > VME_CRCSR_MAX) ||
209 (vme_base > VME_CRCSR_MAX))
219 printk(KERN_ERR "Invalid address space\n");
228 * Request a slave image with specific attributes, return some unique
231 struct vme_resource *vme_slave_request(struct device *dev,
232 vme_address_t address, vme_cycle_t cycle)
234 struct vme_bridge *bridge;
235 struct list_head *slave_pos = NULL;
236 struct vme_slave_resource *allocated_image = NULL;
237 struct vme_slave_resource *slave_image = NULL;
238 struct vme_resource *resource = NULL;
240 bridge = dev_to_bridge(dev);
241 if (bridge == NULL) {
242 printk(KERN_ERR "Can't find VME bus\n");
246 /* Loop through slave resources */
247 list_for_each(slave_pos, &(bridge->slave_resources)) {
248 slave_image = list_entry(slave_pos,
249 struct vme_slave_resource, list);
251 if (slave_image == NULL) {
252 printk(KERN_ERR "Registered NULL Slave resource\n");
256 /* Find an unlocked and compatible image */
257 mutex_lock(&(slave_image->mtx));
258 if (((slave_image->address_attr & address) == address) &&
259 ((slave_image->cycle_attr & cycle) == cycle) &&
260 (slave_image->locked == 0)) {
262 slave_image->locked = 1;
263 mutex_unlock(&(slave_image->mtx));
264 allocated_image = slave_image;
267 mutex_unlock(&(slave_image->mtx));
271 if (allocated_image == NULL)
274 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
275 if (resource == NULL) {
276 printk(KERN_WARNING "Unable to allocate resource structure\n");
279 resource->type = VME_SLAVE;
280 resource->entry = &(allocated_image->list);
286 mutex_lock(&(slave_image->mtx));
287 slave_image->locked = 0;
288 mutex_unlock(&(slave_image->mtx));
293 EXPORT_SYMBOL(vme_slave_request);
295 int vme_slave_set(struct vme_resource *resource, int enabled,
296 unsigned long long vme_base, unsigned long long size,
297 dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
299 struct vme_bridge *bridge = find_bridge(resource);
300 struct vme_slave_resource *image;
303 if (resource->type != VME_SLAVE) {
304 printk(KERN_ERR "Not a slave resource\n");
308 image = list_entry(resource->entry, struct vme_slave_resource, list);
310 if (bridge->slave_set == NULL) {
311 printk(KERN_ERR "Function not supported\n");
315 if (!(((image->address_attr & aspace) == aspace) &&
316 ((image->cycle_attr & cycle) == cycle))) {
317 printk(KERN_ERR "Invalid attributes\n");
321 retval = vme_check_window(aspace, vme_base, size);
325 return bridge->slave_set(image, enabled, vme_base, size, buf_base,
328 EXPORT_SYMBOL(vme_slave_set);
330 int vme_slave_get(struct vme_resource *resource, int *enabled,
331 unsigned long long *vme_base, unsigned long long *size,
332 dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
334 struct vme_bridge *bridge = find_bridge(resource);
335 struct vme_slave_resource *image;
337 if (resource->type != VME_SLAVE) {
338 printk(KERN_ERR "Not a slave resource\n");
342 image = list_entry(resource->entry, struct vme_slave_resource, list);
344 if (bridge->slave_get == NULL) {
345 printk(KERN_ERR "vme_slave_get not supported\n");
349 return bridge->slave_get(image, enabled, vme_base, size, buf_base,
352 EXPORT_SYMBOL(vme_slave_get);
354 void vme_slave_free(struct vme_resource *resource)
356 struct vme_slave_resource *slave_image;
358 if (resource->type != VME_SLAVE) {
359 printk(KERN_ERR "Not a slave resource\n");
363 slave_image = list_entry(resource->entry, struct vme_slave_resource,
365 if (slave_image == NULL) {
366 printk(KERN_ERR "Can't find slave resource\n");
371 mutex_lock(&(slave_image->mtx));
372 if (slave_image->locked == 0)
373 printk(KERN_ERR "Image is already free\n");
375 slave_image->locked = 0;
376 mutex_unlock(&(slave_image->mtx));
378 /* Free up resource memory */
381 EXPORT_SYMBOL(vme_slave_free);
384 * Request a master image with specific attributes, return some unique
387 struct vme_resource *vme_master_request(struct device *dev,
388 vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
390 struct vme_bridge *bridge;
391 struct list_head *master_pos = NULL;
392 struct vme_master_resource *allocated_image = NULL;
393 struct vme_master_resource *master_image = NULL;
394 struct vme_resource *resource = NULL;
396 bridge = dev_to_bridge(dev);
397 if (bridge == NULL) {
398 printk(KERN_ERR "Can't find VME bus\n");
402 /* Loop through master resources */
403 list_for_each(master_pos, &(bridge->master_resources)) {
404 master_image = list_entry(master_pos,
405 struct vme_master_resource, list);
407 if (master_image == NULL) {
408 printk(KERN_WARNING "Registered NULL master resource\n");
412 /* Find an unlocked and compatible image */
413 spin_lock(&(master_image->lock));
414 if (((master_image->address_attr & address) == address) &&
415 ((master_image->cycle_attr & cycle) == cycle) &&
416 ((master_image->width_attr & dwidth) == dwidth) &&
417 (master_image->locked == 0)) {
419 master_image->locked = 1;
420 spin_unlock(&(master_image->lock));
421 allocated_image = master_image;
424 spin_unlock(&(master_image->lock));
427 /* Check to see if we found a resource */
428 if (allocated_image == NULL) {
429 printk(KERN_ERR "Can't find a suitable resource\n");
433 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
434 if (resource == NULL) {
435 printk(KERN_ERR "Unable to allocate resource structure\n");
438 resource->type = VME_MASTER;
439 resource->entry = &(allocated_image->list);
446 spin_lock(&(master_image->lock));
447 master_image->locked = 0;
448 spin_unlock(&(master_image->lock));
453 EXPORT_SYMBOL(vme_master_request);
455 int vme_master_set(struct vme_resource *resource, int enabled,
456 unsigned long long vme_base, unsigned long long size,
457 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
459 struct vme_bridge *bridge = find_bridge(resource);
460 struct vme_master_resource *image;
463 if (resource->type != VME_MASTER) {
464 printk(KERN_ERR "Not a master resource\n");
468 image = list_entry(resource->entry, struct vme_master_resource, list);
470 if (bridge->master_set == NULL) {
471 printk(KERN_WARNING "vme_master_set not supported\n");
475 if (!(((image->address_attr & aspace) == aspace) &&
476 ((image->cycle_attr & cycle) == cycle) &&
477 ((image->width_attr & dwidth) == dwidth))) {
478 printk(KERN_WARNING "Invalid attributes\n");
482 retval = vme_check_window(aspace, vme_base, size);
486 return bridge->master_set(image, enabled, vme_base, size, aspace,
489 EXPORT_SYMBOL(vme_master_set);
491 int vme_master_get(struct vme_resource *resource, int *enabled,
492 unsigned long long *vme_base, unsigned long long *size,
493 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
495 struct vme_bridge *bridge = find_bridge(resource);
496 struct vme_master_resource *image;
498 if (resource->type != VME_MASTER) {
499 printk(KERN_ERR "Not a master resource\n");
503 image = list_entry(resource->entry, struct vme_master_resource, list);
505 if (bridge->master_get == NULL) {
506 printk(KERN_WARNING "vme_master_set not supported\n");
510 return bridge->master_get(image, enabled, vme_base, size, aspace,
513 EXPORT_SYMBOL(vme_master_get);
516 * Read data out of VME space into a buffer.
518 ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
521 struct vme_bridge *bridge = find_bridge(resource);
522 struct vme_master_resource *image;
525 if (bridge->master_read == NULL) {
526 printk(KERN_WARNING "Reading from resource not supported\n");
530 if (resource->type != VME_MASTER) {
531 printk(KERN_ERR "Not a master resource\n");
535 image = list_entry(resource->entry, struct vme_master_resource, list);
537 length = vme_get_size(resource);
539 if (offset > length) {
540 printk(KERN_WARNING "Invalid Offset\n");
544 if ((offset + count) > length)
545 count = length - offset;
547 return bridge->master_read(image, buf, count, offset);
550 EXPORT_SYMBOL(vme_master_read);
553 * Write data out to VME space from a buffer.
555 ssize_t vme_master_write(struct vme_resource *resource, void *buf,
556 size_t count, loff_t offset)
558 struct vme_bridge *bridge = find_bridge(resource);
559 struct vme_master_resource *image;
562 if (bridge->master_write == NULL) {
563 printk(KERN_WARNING "Writing to resource not supported\n");
567 if (resource->type != VME_MASTER) {
568 printk(KERN_ERR "Not a master resource\n");
572 image = list_entry(resource->entry, struct vme_master_resource, list);
574 length = vme_get_size(resource);
576 if (offset > length) {
577 printk(KERN_WARNING "Invalid Offset\n");
581 if ((offset + count) > length)
582 count = length - offset;
584 return bridge->master_write(image, buf, count, offset);
586 EXPORT_SYMBOL(vme_master_write);
589 * Perform RMW cycle to provided location.
591 unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
592 unsigned int compare, unsigned int swap, loff_t offset)
594 struct vme_bridge *bridge = find_bridge(resource);
595 struct vme_master_resource *image;
597 if (bridge->master_rmw == NULL) {
598 printk(KERN_WARNING "Writing to resource not supported\n");
602 if (resource->type != VME_MASTER) {
603 printk(KERN_ERR "Not a master resource\n");
607 image = list_entry(resource->entry, struct vme_master_resource, list);
609 return bridge->master_rmw(image, mask, compare, swap, offset);
611 EXPORT_SYMBOL(vme_master_rmw);
613 void vme_master_free(struct vme_resource *resource)
615 struct vme_master_resource *master_image;
617 if (resource->type != VME_MASTER) {
618 printk(KERN_ERR "Not a master resource\n");
622 master_image = list_entry(resource->entry, struct vme_master_resource,
624 if (master_image == NULL) {
625 printk(KERN_ERR "Can't find master resource\n");
630 spin_lock(&(master_image->lock));
631 if (master_image->locked == 0)
632 printk(KERN_ERR "Image is already free\n");
634 master_image->locked = 0;
635 spin_unlock(&(master_image->lock));
637 /* Free up resource memory */
640 EXPORT_SYMBOL(vme_master_free);
643 * Request a DMA controller with specific attributes, return some unique
646 struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
648 struct vme_bridge *bridge;
649 struct list_head *dma_pos = NULL;
650 struct vme_dma_resource *allocated_ctrlr = NULL;
651 struct vme_dma_resource *dma_ctrlr = NULL;
652 struct vme_resource *resource = NULL;
654 /* XXX Not checking resource attributes */
655 printk(KERN_ERR "No VME resource Attribute tests done\n");
657 bridge = dev_to_bridge(dev);
658 if (bridge == NULL) {
659 printk(KERN_ERR "Can't find VME bus\n");
663 /* Loop through DMA resources */
664 list_for_each(dma_pos, &(bridge->dma_resources)) {
665 dma_ctrlr = list_entry(dma_pos,
666 struct vme_dma_resource, list);
668 if (dma_ctrlr == NULL) {
669 printk(KERN_ERR "Registered NULL DMA resource\n");
673 /* Find an unlocked and compatible controller */
674 mutex_lock(&(dma_ctrlr->mtx));
675 if (((dma_ctrlr->route_attr & route) == route) &&
676 (dma_ctrlr->locked == 0)) {
678 dma_ctrlr->locked = 1;
679 mutex_unlock(&(dma_ctrlr->mtx));
680 allocated_ctrlr = dma_ctrlr;
683 mutex_unlock(&(dma_ctrlr->mtx));
686 /* Check to see if we found a resource */
687 if (allocated_ctrlr == NULL)
690 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
691 if (resource == NULL) {
692 printk(KERN_WARNING "Unable to allocate resource structure\n");
695 resource->type = VME_DMA;
696 resource->entry = &(allocated_ctrlr->list);
702 mutex_lock(&(dma_ctrlr->mtx));
703 dma_ctrlr->locked = 0;
704 mutex_unlock(&(dma_ctrlr->mtx));
709 EXPORT_SYMBOL(vme_dma_request);
714 struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
716 struct vme_dma_resource *ctrlr;
717 struct vme_dma_list *dma_list;
719 if (resource->type != VME_DMA) {
720 printk(KERN_ERR "Not a DMA resource\n");
724 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
726 dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
727 if (dma_list == NULL) {
728 printk(KERN_ERR "Unable to allocate memory for new dma list\n");
731 INIT_LIST_HEAD(&(dma_list->entries));
732 dma_list->parent = ctrlr;
733 mutex_init(&(dma_list->mtx));
737 EXPORT_SYMBOL(vme_new_dma_list);
740 * Create "Pattern" type attributes
742 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
745 struct vme_dma_attr *attributes;
746 struct vme_dma_pattern *pattern_attr;
748 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
749 if (attributes == NULL) {
750 printk(KERN_ERR "Unable to allocate memory for attributes "
755 pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
756 if (pattern_attr == NULL) {
757 printk(KERN_ERR "Unable to allocate memory for pattern "
762 attributes->type = VME_DMA_PATTERN;
763 attributes->private = (void *)pattern_attr;
765 pattern_attr->pattern = pattern;
766 pattern_attr->type = type;
776 EXPORT_SYMBOL(vme_dma_pattern_attribute);
779 * Create "PCI" type attributes
781 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
783 struct vme_dma_attr *attributes;
784 struct vme_dma_pci *pci_attr;
786 /* XXX Run some sanity checks here */
788 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
789 if (attributes == NULL) {
790 printk(KERN_ERR "Unable to allocate memory for attributes "
795 pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
796 if (pci_attr == NULL) {
797 printk(KERN_ERR "Unable to allocate memory for pci "
804 attributes->type = VME_DMA_PCI;
805 attributes->private = (void *)pci_attr;
807 pci_attr->address = address;
817 EXPORT_SYMBOL(vme_dma_pci_attribute);
820 * Create "VME" type attributes
822 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
823 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
825 struct vme_dma_attr *attributes;
826 struct vme_dma_vme *vme_attr;
828 attributes = kmalloc(
829 sizeof(struct vme_dma_attr), GFP_KERNEL);
830 if (attributes == NULL) {
831 printk(KERN_ERR "Unable to allocate memory for attributes "
836 vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
837 if (vme_attr == NULL) {
838 printk(KERN_ERR "Unable to allocate memory for vme "
843 attributes->type = VME_DMA_VME;
844 attributes->private = (void *)vme_attr;
846 vme_attr->address = address;
847 vme_attr->aspace = aspace;
848 vme_attr->cycle = cycle;
849 vme_attr->dwidth = dwidth;
859 EXPORT_SYMBOL(vme_dma_vme_attribute);
864 void vme_dma_free_attribute(struct vme_dma_attr *attributes)
866 kfree(attributes->private);
869 EXPORT_SYMBOL(vme_dma_free_attribute);
871 int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
872 struct vme_dma_attr *dest, size_t count)
874 struct vme_bridge *bridge = list->parent->parent;
877 if (bridge->dma_list_add == NULL) {
878 printk(KERN_WARNING "Link List DMA generation not supported\n");
882 if (!mutex_trylock(&(list->mtx))) {
883 printk(KERN_ERR "Link List already submitted\n");
887 retval = bridge->dma_list_add(list, src, dest, count);
889 mutex_unlock(&(list->mtx));
893 EXPORT_SYMBOL(vme_dma_list_add);
895 int vme_dma_list_exec(struct vme_dma_list *list)
897 struct vme_bridge *bridge = list->parent->parent;
900 if (bridge->dma_list_exec == NULL) {
901 printk(KERN_ERR "Link List DMA execution not supported\n");
905 mutex_lock(&(list->mtx));
907 retval = bridge->dma_list_exec(list);
909 mutex_unlock(&(list->mtx));
913 EXPORT_SYMBOL(vme_dma_list_exec);
915 int vme_dma_list_free(struct vme_dma_list *list)
917 struct vme_bridge *bridge = list->parent->parent;
920 if (bridge->dma_list_empty == NULL) {
921 printk(KERN_WARNING "Emptying of Link Lists not supported\n");
925 if (!mutex_trylock(&(list->mtx))) {
926 printk(KERN_ERR "Link List in use\n");
931 * Empty out all of the entries from the dma list. We need to go to the
932 * low level driver as dma entries are driver specific.
934 retval = bridge->dma_list_empty(list);
936 printk(KERN_ERR "Unable to empty link-list entries\n");
937 mutex_unlock(&(list->mtx));
940 mutex_unlock(&(list->mtx));
945 EXPORT_SYMBOL(vme_dma_list_free);
947 int vme_dma_free(struct vme_resource *resource)
949 struct vme_dma_resource *ctrlr;
951 if (resource->type != VME_DMA) {
952 printk(KERN_ERR "Not a DMA resource\n");
956 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
958 if (!mutex_trylock(&(ctrlr->mtx))) {
959 printk(KERN_ERR "Resource busy, can't free\n");
963 if (!(list_empty(&(ctrlr->pending)) && list_empty(&(ctrlr->running)))) {
964 printk(KERN_WARNING "Resource still processing transfers\n");
965 mutex_unlock(&(ctrlr->mtx));
971 mutex_unlock(&(ctrlr->mtx));
975 EXPORT_SYMBOL(vme_dma_free);
977 void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
979 void (*call)(int, int, void *);
982 call = bridge->irq[level - 1].callback[statid].func;
983 priv_data = bridge->irq[level - 1].callback[statid].priv_data;
986 call(level, statid, priv_data);
988 printk(KERN_WARNING "Spurilous VME interrupt, level:%x, "
989 "vector:%x\n", level, statid);
991 EXPORT_SYMBOL(vme_irq_handler);
993 int vme_irq_request(struct device *dev, int level, int statid,
994 void (*callback)(int, int, void *),
997 struct vme_bridge *bridge;
999 bridge = dev_to_bridge(dev);
1000 if (bridge == NULL) {
1001 printk(KERN_ERR "Can't find VME bus\n");
1005 if ((level < 1) || (level > 7)) {
1006 printk(KERN_ERR "Invalid interrupt level\n");
1010 if (bridge->irq_set == NULL) {
1011 printk(KERN_ERR "Configuring interrupts not supported\n");
1015 mutex_lock(&(bridge->irq_mtx));
1017 if (bridge->irq[level - 1].callback[statid].func) {
1018 mutex_unlock(&(bridge->irq_mtx));
1019 printk(KERN_WARNING "VME Interrupt already taken\n");
1023 bridge->irq[level - 1].count++;
1024 bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1025 bridge->irq[level - 1].callback[statid].func = callback;
1027 /* Enable IRQ level */
1028 bridge->irq_set(bridge, level, 1, 1);
1030 mutex_unlock(&(bridge->irq_mtx));
1034 EXPORT_SYMBOL(vme_irq_request);
1036 void vme_irq_free(struct device *dev, int level, int statid)
1038 struct vme_bridge *bridge;
1040 bridge = dev_to_bridge(dev);
1041 if (bridge == NULL) {
1042 printk(KERN_ERR "Can't find VME bus\n");
1046 if ((level < 1) || (level > 7)) {
1047 printk(KERN_ERR "Invalid interrupt level\n");
1051 if (bridge->irq_set == NULL) {
1052 printk(KERN_ERR "Configuring interrupts not supported\n");
1056 mutex_lock(&(bridge->irq_mtx));
1058 bridge->irq[level - 1].count--;
1060 /* Disable IRQ level if no more interrupts attached at this level*/
1061 if (bridge->irq[level - 1].count == 0)
1062 bridge->irq_set(bridge, level, 0, 1);
1064 bridge->irq[level - 1].callback[statid].func = NULL;
1065 bridge->irq[level - 1].callback[statid].priv_data = NULL;
1067 mutex_unlock(&(bridge->irq_mtx));
1069 EXPORT_SYMBOL(vme_irq_free);
1071 int vme_irq_generate(struct device *dev, int level, int statid)
1073 struct vme_bridge *bridge;
1075 bridge = dev_to_bridge(dev);
1076 if (bridge == NULL) {
1077 printk(KERN_ERR "Can't find VME bus\n");
1081 if ((level < 1) || (level > 7)) {
1082 printk(KERN_WARNING "Invalid interrupt level\n");
1086 if (bridge->irq_generate == NULL) {
1087 printk(KERN_WARNING "Interrupt generation not supported\n");
1091 return bridge->irq_generate(bridge, level, statid);
1093 EXPORT_SYMBOL(vme_irq_generate);
1096 * Request the location monitor, return resource or NULL
1098 struct vme_resource *vme_lm_request(struct device *dev)
1100 struct vme_bridge *bridge;
1101 struct list_head *lm_pos = NULL;
1102 struct vme_lm_resource *allocated_lm = NULL;
1103 struct vme_lm_resource *lm = NULL;
1104 struct vme_resource *resource = NULL;
1106 bridge = dev_to_bridge(dev);
1107 if (bridge == NULL) {
1108 printk(KERN_ERR "Can't find VME bus\n");
1112 /* Loop through DMA resources */
1113 list_for_each(lm_pos, &(bridge->lm_resources)) {
1114 lm = list_entry(lm_pos,
1115 struct vme_lm_resource, list);
1118 printk(KERN_ERR "Registered NULL Location Monitor "
1123 /* Find an unlocked controller */
1124 mutex_lock(&(lm->mtx));
1125 if (lm->locked == 0) {
1127 mutex_unlock(&(lm->mtx));
1131 mutex_unlock(&(lm->mtx));
1134 /* Check to see if we found a resource */
1135 if (allocated_lm == NULL)
1138 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1139 if (resource == NULL) {
1140 printk(KERN_ERR "Unable to allocate resource structure\n");
1143 resource->type = VME_LM;
1144 resource->entry = &(allocated_lm->list);
1150 mutex_lock(&(lm->mtx));
1152 mutex_unlock(&(lm->mtx));
1157 EXPORT_SYMBOL(vme_lm_request);
1159 int vme_lm_count(struct vme_resource *resource)
1161 struct vme_lm_resource *lm;
1163 if (resource->type != VME_LM) {
1164 printk(KERN_ERR "Not a Location Monitor resource\n");
1168 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1170 return lm->monitors;
1172 EXPORT_SYMBOL(vme_lm_count);
1174 int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1175 vme_address_t aspace, vme_cycle_t cycle)
1177 struct vme_bridge *bridge = find_bridge(resource);
1178 struct vme_lm_resource *lm;
1180 if (resource->type != VME_LM) {
1181 printk(KERN_ERR "Not a Location Monitor resource\n");
1185 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1187 if (bridge->lm_set == NULL) {
1188 printk(KERN_ERR "vme_lm_set not supported\n");
1192 return bridge->lm_set(lm, lm_base, aspace, cycle);
1194 EXPORT_SYMBOL(vme_lm_set);
1196 int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1197 vme_address_t *aspace, vme_cycle_t *cycle)
1199 struct vme_bridge *bridge = find_bridge(resource);
1200 struct vme_lm_resource *lm;
1202 if (resource->type != VME_LM) {
1203 printk(KERN_ERR "Not a Location Monitor resource\n");
1207 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1209 if (bridge->lm_get == NULL) {
1210 printk(KERN_ERR "vme_lm_get not supported\n");
1214 return bridge->lm_get(lm, lm_base, aspace, cycle);
1216 EXPORT_SYMBOL(vme_lm_get);
1218 int vme_lm_attach(struct vme_resource *resource, int monitor,
1219 void (*callback)(int))
1221 struct vme_bridge *bridge = find_bridge(resource);
1222 struct vme_lm_resource *lm;
1224 if (resource->type != VME_LM) {
1225 printk(KERN_ERR "Not a Location Monitor resource\n");
1229 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1231 if (bridge->lm_attach == NULL) {
1232 printk(KERN_ERR "vme_lm_attach not supported\n");
1236 return bridge->lm_attach(lm, monitor, callback);
1238 EXPORT_SYMBOL(vme_lm_attach);
1240 int vme_lm_detach(struct vme_resource *resource, int monitor)
1242 struct vme_bridge *bridge = find_bridge(resource);
1243 struct vme_lm_resource *lm;
1245 if (resource->type != VME_LM) {
1246 printk(KERN_ERR "Not a Location Monitor resource\n");
1250 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1252 if (bridge->lm_detach == NULL) {
1253 printk(KERN_ERR "vme_lm_detach not supported\n");
1257 return bridge->lm_detach(lm, monitor);
1259 EXPORT_SYMBOL(vme_lm_detach);
1261 void vme_lm_free(struct vme_resource *resource)
1263 struct vme_lm_resource *lm;
1265 if (resource->type != VME_LM) {
1266 printk(KERN_ERR "Not a Location Monitor resource\n");
1270 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1272 mutex_lock(&(lm->mtx));
1275 * Check to see that there aren't any callbacks still attached, if
1276 * there are we should probably be detaching them!
1281 mutex_unlock(&(lm->mtx));
1285 EXPORT_SYMBOL(vme_lm_free);
1287 int vme_slot_get(struct device *bus)
1289 struct vme_bridge *bridge;
1291 bridge = dev_to_bridge(bus);
1292 if (bridge == NULL) {
1293 printk(KERN_ERR "Can't find VME bus\n");
1297 if (bridge->slot_get == NULL) {
1298 printk(KERN_WARNING "vme_slot_get not supported\n");
1302 return bridge->slot_get(bridge);
1304 EXPORT_SYMBOL(vme_slot_get);
1307 /* - Bridge Registration --------------------------------------------------- */
1309 static int vme_alloc_bus_num(void)
1313 mutex_lock(&vme_bus_num_mtx);
1314 for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1315 if (((vme_bus_numbers >> i) & 0x1) == 0) {
1316 vme_bus_numbers |= (0x1 << i);
1320 mutex_unlock(&vme_bus_num_mtx);
1325 static void vme_free_bus_num(int bus)
1327 mutex_lock(&vme_bus_num_mtx);
1328 vme_bus_numbers |= ~(0x1 << bus);
1329 mutex_unlock(&vme_bus_num_mtx);
1332 int vme_register_bridge(struct vme_bridge *bridge)
1338 bridge->num = vme_alloc_bus_num();
1340 /* This creates 32 vme "slot" devices. This equates to a slot for each
1341 * ID available in a system conforming to the ANSI/VITA 1-1994
1344 for (i = 0; i < VME_SLOTS_MAX; i++) {
1345 dev = &(bridge->dev[i]);
1346 memset(dev, 0, sizeof(struct device));
1348 dev->parent = bridge->parent;
1349 dev->bus = &(vme_bus_type);
1351 * We save a pointer to the bridge in platform_data so that we
1352 * can get to it later. We keep driver_data for use by the
1353 * driver that binds against the slot
1355 dev->platform_data = bridge;
1356 dev_set_name(dev, "vme-%x.%x", bridge->num, i + 1);
1358 retval = device_register(dev);
1368 dev = &(bridge->dev[i]);
1369 device_unregister(dev);
1371 vme_free_bus_num(bridge->num);
1374 EXPORT_SYMBOL(vme_register_bridge);
1376 void vme_unregister_bridge(struct vme_bridge *bridge)
1382 for (i = 0; i < VME_SLOTS_MAX; i++) {
1383 dev = &(bridge->dev[i]);
1384 device_unregister(dev);
1386 vme_free_bus_num(bridge->num);
1388 EXPORT_SYMBOL(vme_unregister_bridge);
1391 /* - Driver Registration --------------------------------------------------- */
1393 int vme_register_driver(struct vme_driver *drv)
1395 drv->driver.name = drv->name;
1396 drv->driver.bus = &vme_bus_type;
1398 return driver_register(&drv->driver);
1400 EXPORT_SYMBOL(vme_register_driver);
1402 void vme_unregister_driver(struct vme_driver *drv)
1404 driver_unregister(&drv->driver);
1406 EXPORT_SYMBOL(vme_unregister_driver);
1408 /* - Bus Registration ------------------------------------------------------ */
1410 int vme_calc_slot(struct device *dev)
1412 struct vme_bridge *bridge;
1415 bridge = dev_to_bridge(dev);
1417 /* Determine slot number */
1419 while (num < VME_SLOTS_MAX) {
1420 if (&(bridge->dev[num]) == dev)
1425 if (num == VME_SLOTS_MAX) {
1426 dev_err(dev, "Failed to identify slot\n");
1436 static struct vme_driver *dev_to_vme_driver(struct device *dev)
1438 if (dev->driver == NULL)
1439 printk(KERN_ERR "Bugger dev->driver is NULL\n");
1441 return container_of(dev->driver, struct vme_driver, driver);
1444 static int vme_bus_match(struct device *dev, struct device_driver *drv)
1446 struct vme_bridge *bridge;
1447 struct vme_driver *driver;
1450 bridge = dev_to_bridge(dev);
1451 driver = container_of(drv, struct vme_driver, driver);
1453 num = vme_calc_slot(dev);
1457 if (driver->bind_table == NULL) {
1458 dev_err(dev, "Bind table NULL\n");
1463 while ((driver->bind_table[i].bus != 0) ||
1464 (driver->bind_table[i].slot != 0)) {
1466 if (bridge->num == driver->bind_table[i].bus) {
1467 if (num == driver->bind_table[i].slot)
1470 if (driver->bind_table[i].slot == VME_SLOT_ALL)
1473 if ((driver->bind_table[i].slot == VME_SLOT_CURRENT) &&
1474 (num == vme_slot_get(dev)))
1485 static int vme_bus_probe(struct device *dev)
1487 struct vme_bridge *bridge;
1488 struct vme_driver *driver;
1489 int retval = -ENODEV;
1491 driver = dev_to_vme_driver(dev);
1492 bridge = dev_to_bridge(dev);
1494 if (driver->probe != NULL)
1495 retval = driver->probe(dev, bridge->num, vme_calc_slot(dev));
1500 static int vme_bus_remove(struct device *dev)
1502 struct vme_bridge *bridge;
1503 struct vme_driver *driver;
1504 int retval = -ENODEV;
1506 driver = dev_to_vme_driver(dev);
1507 bridge = dev_to_bridge(dev);
1509 if (driver->remove != NULL)
1510 retval = driver->remove(dev, bridge->num, vme_calc_slot(dev));
1515 struct bus_type vme_bus_type = {
1517 .match = vme_bus_match,
1518 .probe = vme_bus_probe,
1519 .remove = vme_bus_remove,
1521 EXPORT_SYMBOL(vme_bus_type);
1523 static int __init vme_init(void)
1525 return bus_register(&vme_bus_type);
1528 static void __exit vme_exit(void)
1530 bus_unregister(&vme_bus_type);
1533 MODULE_DESCRIPTION("VME bridge driver framework");
1534 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1535 MODULE_LICENSE("GPL");
1537 module_init(vme_init);
1538 module_exit(vme_exit);