Merge branch 'vhost' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[pandora-kernel.git] / drivers / staging / vme / vme.c
1 /*
2  * VME Bridge Framework
3  *
4  * Author: Martyn Welch <martyn.welch@ge.com>
5  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by Tom Armistead and Ajit Prem
8  * Copyright 2004 Motorola Inc.
9  *
10  * This program is free software; you can redistribute  it and/or modify it
11  * under  the terms of  the GNU General  Public License as published by the
12  * Free Software Foundation;  either version 2 of the  License, or (at your
13  * option) any later version.
14  */
15
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32
33 #include "vme.h"
34 #include "vme_bridge.h"
35
36 /* Bitmask and mutex to keep track of bridge numbers */
37 static unsigned int vme_bus_numbers;
38 DEFINE_MUTEX(vme_bus_num_mtx);
39
40 static void __exit vme_exit(void);
41 static int __init vme_init(void);
42
43
44 /*
45  * Find the bridge resource associated with a specific device resource
46  */
47 static struct vme_bridge *dev_to_bridge(struct device *dev)
48 {
49         return dev->platform_data;
50 }
51
52 /*
53  * Find the bridge that the resource is associated with.
54  */
55 static struct vme_bridge *find_bridge(struct vme_resource *resource)
56 {
57         /* Get list to search */
58         switch (resource->type) {
59         case VME_MASTER:
60                 return list_entry(resource->entry, struct vme_master_resource,
61                         list)->parent;
62                 break;
63         case VME_SLAVE:
64                 return list_entry(resource->entry, struct vme_slave_resource,
65                         list)->parent;
66                 break;
67         case VME_DMA:
68                 return list_entry(resource->entry, struct vme_dma_resource,
69                         list)->parent;
70                 break;
71         case VME_LM:
72                 return list_entry(resource->entry, struct vme_lm_resource,
73                         list)->parent;
74                 break;
75         default:
76                 printk(KERN_ERR "Unknown resource type\n");
77                 return NULL;
78                 break;
79         }
80 }
81
82 /*
83  * Allocate a contiguous block of memory for use by the driver. This is used to
84  * create the buffers for the slave windows.
85  *
86  * XXX VME bridges could be available on buses other than PCI. At the momment
87  *     this framework only supports PCI devices.
88  */
89 void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
90         dma_addr_t *dma)
91 {
92         struct vme_bridge *bridge;
93         struct pci_dev *pdev;
94
95         if (resource == NULL) {
96                 printk(KERN_ERR "No resource\n");
97                 return NULL;
98         }
99
100         bridge = find_bridge(resource);
101         if (bridge == NULL) {
102                 printk(KERN_ERR "Can't find bridge\n");
103                 return NULL;
104         }
105
106         /* Find pci_dev container of dev */
107         if (bridge->parent == NULL) {
108                 printk(KERN_ERR "Dev entry NULL\n");
109                 return NULL;
110         }
111         pdev = container_of(bridge->parent, struct pci_dev, dev);
112
113         return pci_alloc_consistent(pdev, size, dma);
114 }
115 EXPORT_SYMBOL(vme_alloc_consistent);
116
117 /*
118  * Free previously allocated contiguous block of memory.
119  *
120  * XXX VME bridges could be available on buses other than PCI. At the momment
121  *     this framework only supports PCI devices.
122  */
123 void vme_free_consistent(struct vme_resource *resource, size_t size,
124         void *vaddr, dma_addr_t dma)
125 {
126         struct vme_bridge *bridge;
127         struct pci_dev *pdev;
128
129         if (resource == NULL) {
130                 printk(KERN_ERR "No resource\n");
131                 return;
132         }
133
134         bridge = find_bridge(resource);
135         if (bridge == NULL) {
136                 printk(KERN_ERR "Can't find bridge\n");
137                 return;
138         }
139
140         /* Find pci_dev container of dev */
141         pdev = container_of(bridge->parent, struct pci_dev, dev);
142
143         pci_free_consistent(pdev, size, vaddr, dma);
144 }
145 EXPORT_SYMBOL(vme_free_consistent);
146
147 size_t vme_get_size(struct vme_resource *resource)
148 {
149         int enabled, retval;
150         unsigned long long base, size;
151         dma_addr_t buf_base;
152         vme_address_t aspace;
153         vme_cycle_t cycle;
154         vme_width_t dwidth;
155
156         switch (resource->type) {
157         case VME_MASTER:
158                 retval = vme_master_get(resource, &enabled, &base, &size,
159                         &aspace, &cycle, &dwidth);
160
161                 return size;
162                 break;
163         case VME_SLAVE:
164                 retval = vme_slave_get(resource, &enabled, &base, &size,
165                         &buf_base, &aspace, &cycle);
166
167                 return size;
168                 break;
169         case VME_DMA:
170                 return 0;
171                 break;
172         default:
173                 printk(KERN_ERR "Unknown resource type\n");
174                 return 0;
175                 break;
176         }
177 }
178 EXPORT_SYMBOL(vme_get_size);
179
180 static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
181         unsigned long long size)
182 {
183         int retval = 0;
184
185         switch (aspace) {
186         case VME_A16:
187                 if (((vme_base + size) > VME_A16_MAX) ||
188                                 (vme_base > VME_A16_MAX))
189                         retval = -EFAULT;
190                 break;
191         case VME_A24:
192                 if (((vme_base + size) > VME_A24_MAX) ||
193                                 (vme_base > VME_A24_MAX))
194                         retval = -EFAULT;
195                 break;
196         case VME_A32:
197                 if (((vme_base + size) > VME_A32_MAX) ||
198                                 (vme_base > VME_A32_MAX))
199                         retval = -EFAULT;
200                 break;
201         case VME_A64:
202                 /*
203                  * Any value held in an unsigned long long can be used as the
204                  * base
205                  */
206                 break;
207         case VME_CRCSR:
208                 if (((vme_base + size) > VME_CRCSR_MAX) ||
209                                 (vme_base > VME_CRCSR_MAX))
210                         retval = -EFAULT;
211                 break;
212         case VME_USER1:
213         case VME_USER2:
214         case VME_USER3:
215         case VME_USER4:
216                 /* User Defined */
217                 break;
218         default:
219                 printk(KERN_ERR "Invalid address space\n");
220                 retval = -EINVAL;
221                 break;
222         }
223
224         return retval;
225 }
226
227 /*
228  * Request a slave image with specific attributes, return some unique
229  * identifier.
230  */
231 struct vme_resource *vme_slave_request(struct device *dev,
232         vme_address_t address, vme_cycle_t cycle)
233 {
234         struct vme_bridge *bridge;
235         struct list_head *slave_pos = NULL;
236         struct vme_slave_resource *allocated_image = NULL;
237         struct vme_slave_resource *slave_image = NULL;
238         struct vme_resource *resource = NULL;
239
240         bridge = dev_to_bridge(dev);
241         if (bridge == NULL) {
242                 printk(KERN_ERR "Can't find VME bus\n");
243                 goto err_bus;
244         }
245
246         /* Loop through slave resources */
247         list_for_each(slave_pos, &(bridge->slave_resources)) {
248                 slave_image = list_entry(slave_pos,
249                         struct vme_slave_resource, list);
250
251                 if (slave_image == NULL) {
252                         printk(KERN_ERR "Registered NULL Slave resource\n");
253                         continue;
254                 }
255
256                 /* Find an unlocked and compatible image */
257                 mutex_lock(&(slave_image->mtx));
258                 if (((slave_image->address_attr & address) == address) &&
259                         ((slave_image->cycle_attr & cycle) == cycle) &&
260                         (slave_image->locked == 0)) {
261
262                         slave_image->locked = 1;
263                         mutex_unlock(&(slave_image->mtx));
264                         allocated_image = slave_image;
265                         break;
266                 }
267                 mutex_unlock(&(slave_image->mtx));
268         }
269
270         /* No free image */
271         if (allocated_image == NULL)
272                 goto err_image;
273
274         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
275         if (resource == NULL) {
276                 printk(KERN_WARNING "Unable to allocate resource structure\n");
277                 goto err_alloc;
278         }
279         resource->type = VME_SLAVE;
280         resource->entry = &(allocated_image->list);
281
282         return resource;
283
284 err_alloc:
285         /* Unlock image */
286         mutex_lock(&(slave_image->mtx));
287         slave_image->locked = 0;
288         mutex_unlock(&(slave_image->mtx));
289 err_image:
290 err_bus:
291         return NULL;
292 }
293 EXPORT_SYMBOL(vme_slave_request);
294
295 int vme_slave_set(struct vme_resource *resource, int enabled,
296         unsigned long long vme_base, unsigned long long size,
297         dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
298 {
299         struct vme_bridge *bridge = find_bridge(resource);
300         struct vme_slave_resource *image;
301         int retval;
302
303         if (resource->type != VME_SLAVE) {
304                 printk(KERN_ERR "Not a slave resource\n");
305                 return -EINVAL;
306         }
307
308         image = list_entry(resource->entry, struct vme_slave_resource, list);
309
310         if (bridge->slave_set == NULL) {
311                 printk(KERN_ERR "Function not supported\n");
312                 return -ENOSYS;
313         }
314
315         if (!(((image->address_attr & aspace) == aspace) &&
316                 ((image->cycle_attr & cycle) == cycle))) {
317                 printk(KERN_ERR "Invalid attributes\n");
318                 return -EINVAL;
319         }
320
321         retval = vme_check_window(aspace, vme_base, size);
322         if (retval)
323                 return retval;
324
325         return bridge->slave_set(image, enabled, vme_base, size, buf_base,
326                 aspace, cycle);
327 }
328 EXPORT_SYMBOL(vme_slave_set);
329
330 int vme_slave_get(struct vme_resource *resource, int *enabled,
331         unsigned long long *vme_base, unsigned long long *size,
332         dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
333 {
334         struct vme_bridge *bridge = find_bridge(resource);
335         struct vme_slave_resource *image;
336
337         if (resource->type != VME_SLAVE) {
338                 printk(KERN_ERR "Not a slave resource\n");
339                 return -EINVAL;
340         }
341
342         image = list_entry(resource->entry, struct vme_slave_resource, list);
343
344         if (bridge->slave_get == NULL) {
345                 printk(KERN_ERR "vme_slave_get not supported\n");
346                 return -EINVAL;
347         }
348
349         return bridge->slave_get(image, enabled, vme_base, size, buf_base,
350                 aspace, cycle);
351 }
352 EXPORT_SYMBOL(vme_slave_get);
353
354 void vme_slave_free(struct vme_resource *resource)
355 {
356         struct vme_slave_resource *slave_image;
357
358         if (resource->type != VME_SLAVE) {
359                 printk(KERN_ERR "Not a slave resource\n");
360                 return;
361         }
362
363         slave_image = list_entry(resource->entry, struct vme_slave_resource,
364                 list);
365         if (slave_image == NULL) {
366                 printk(KERN_ERR "Can't find slave resource\n");
367                 return;
368         }
369
370         /* Unlock image */
371         mutex_lock(&(slave_image->mtx));
372         if (slave_image->locked == 0)
373                 printk(KERN_ERR "Image is already free\n");
374
375         slave_image->locked = 0;
376         mutex_unlock(&(slave_image->mtx));
377
378         /* Free up resource memory */
379         kfree(resource);
380 }
381 EXPORT_SYMBOL(vme_slave_free);
382
383 /*
384  * Request a master image with specific attributes, return some unique
385  * identifier.
386  */
387 struct vme_resource *vme_master_request(struct device *dev,
388         vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
389 {
390         struct vme_bridge *bridge;
391         struct list_head *master_pos = NULL;
392         struct vme_master_resource *allocated_image = NULL;
393         struct vme_master_resource *master_image = NULL;
394         struct vme_resource *resource = NULL;
395
396         bridge = dev_to_bridge(dev);
397         if (bridge == NULL) {
398                 printk(KERN_ERR "Can't find VME bus\n");
399                 goto err_bus;
400         }
401
402         /* Loop through master resources */
403         list_for_each(master_pos, &(bridge->master_resources)) {
404                 master_image = list_entry(master_pos,
405                         struct vme_master_resource, list);
406
407                 if (master_image == NULL) {
408                         printk(KERN_WARNING "Registered NULL master resource\n");
409                         continue;
410                 }
411
412                 /* Find an unlocked and compatible image */
413                 spin_lock(&(master_image->lock));
414                 if (((master_image->address_attr & address) == address) &&
415                         ((master_image->cycle_attr & cycle) == cycle) &&
416                         ((master_image->width_attr & dwidth) == dwidth) &&
417                         (master_image->locked == 0)) {
418
419                         master_image->locked = 1;
420                         spin_unlock(&(master_image->lock));
421                         allocated_image = master_image;
422                         break;
423                 }
424                 spin_unlock(&(master_image->lock));
425         }
426
427         /* Check to see if we found a resource */
428         if (allocated_image == NULL) {
429                 printk(KERN_ERR "Can't find a suitable resource\n");
430                 goto err_image;
431         }
432
433         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
434         if (resource == NULL) {
435                 printk(KERN_ERR "Unable to allocate resource structure\n");
436                 goto err_alloc;
437         }
438         resource->type = VME_MASTER;
439         resource->entry = &(allocated_image->list);
440
441         return resource;
442
443         kfree(resource);
444 err_alloc:
445         /* Unlock image */
446         spin_lock(&(master_image->lock));
447         master_image->locked = 0;
448         spin_unlock(&(master_image->lock));
449 err_image:
450 err_bus:
451         return NULL;
452 }
453 EXPORT_SYMBOL(vme_master_request);
454
455 int vme_master_set(struct vme_resource *resource, int enabled,
456         unsigned long long vme_base, unsigned long long size,
457         vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
458 {
459         struct vme_bridge *bridge = find_bridge(resource);
460         struct vme_master_resource *image;
461         int retval;
462
463         if (resource->type != VME_MASTER) {
464                 printk(KERN_ERR "Not a master resource\n");
465                 return -EINVAL;
466         }
467
468         image = list_entry(resource->entry, struct vme_master_resource, list);
469
470         if (bridge->master_set == NULL) {
471                 printk(KERN_WARNING "vme_master_set not supported\n");
472                 return -EINVAL;
473         }
474
475         if (!(((image->address_attr & aspace) == aspace) &&
476                 ((image->cycle_attr & cycle) == cycle) &&
477                 ((image->width_attr & dwidth) == dwidth))) {
478                 printk(KERN_WARNING "Invalid attributes\n");
479                 return -EINVAL;
480         }
481
482         retval = vme_check_window(aspace, vme_base, size);
483         if (retval)
484                 return retval;
485
486         return bridge->master_set(image, enabled, vme_base, size, aspace,
487                 cycle, dwidth);
488 }
489 EXPORT_SYMBOL(vme_master_set);
490
491 int vme_master_get(struct vme_resource *resource, int *enabled,
492         unsigned long long *vme_base, unsigned long long *size,
493         vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
494 {
495         struct vme_bridge *bridge = find_bridge(resource);
496         struct vme_master_resource *image;
497
498         if (resource->type != VME_MASTER) {
499                 printk(KERN_ERR "Not a master resource\n");
500                 return -EINVAL;
501         }
502
503         image = list_entry(resource->entry, struct vme_master_resource, list);
504
505         if (bridge->master_get == NULL) {
506                 printk(KERN_WARNING "vme_master_set not supported\n");
507                 return -EINVAL;
508         }
509
510         return bridge->master_get(image, enabled, vme_base, size, aspace,
511                 cycle, dwidth);
512 }
513 EXPORT_SYMBOL(vme_master_get);
514
515 /*
516  * Read data out of VME space into a buffer.
517  */
518 ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
519         loff_t offset)
520 {
521         struct vme_bridge *bridge = find_bridge(resource);
522         struct vme_master_resource *image;
523         size_t length;
524
525         if (bridge->master_read == NULL) {
526                 printk(KERN_WARNING "Reading from resource not supported\n");
527                 return -EINVAL;
528         }
529
530         if (resource->type != VME_MASTER) {
531                 printk(KERN_ERR "Not a master resource\n");
532                 return -EINVAL;
533         }
534
535         image = list_entry(resource->entry, struct vme_master_resource, list);
536
537         length = vme_get_size(resource);
538
539         if (offset > length) {
540                 printk(KERN_WARNING "Invalid Offset\n");
541                 return -EFAULT;
542         }
543
544         if ((offset + count) > length)
545                 count = length - offset;
546
547         return bridge->master_read(image, buf, count, offset);
548
549 }
550 EXPORT_SYMBOL(vme_master_read);
551
552 /*
553  * Write data out to VME space from a buffer.
554  */
555 ssize_t vme_master_write(struct vme_resource *resource, void *buf,
556         size_t count, loff_t offset)
557 {
558         struct vme_bridge *bridge = find_bridge(resource);
559         struct vme_master_resource *image;
560         size_t length;
561
562         if (bridge->master_write == NULL) {
563                 printk(KERN_WARNING "Writing to resource not supported\n");
564                 return -EINVAL;
565         }
566
567         if (resource->type != VME_MASTER) {
568                 printk(KERN_ERR "Not a master resource\n");
569                 return -EINVAL;
570         }
571
572         image = list_entry(resource->entry, struct vme_master_resource, list);
573
574         length = vme_get_size(resource);
575
576         if (offset > length) {
577                 printk(KERN_WARNING "Invalid Offset\n");
578                 return -EFAULT;
579         }
580
581         if ((offset + count) > length)
582                 count = length - offset;
583
584         return bridge->master_write(image, buf, count, offset);
585 }
586 EXPORT_SYMBOL(vme_master_write);
587
588 /*
589  * Perform RMW cycle to provided location.
590  */
591 unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
592         unsigned int compare, unsigned int swap, loff_t offset)
593 {
594         struct vme_bridge *bridge = find_bridge(resource);
595         struct vme_master_resource *image;
596
597         if (bridge->master_rmw == NULL) {
598                 printk(KERN_WARNING "Writing to resource not supported\n");
599                 return -EINVAL;
600         }
601
602         if (resource->type != VME_MASTER) {
603                 printk(KERN_ERR "Not a master resource\n");
604                 return -EINVAL;
605         }
606
607         image = list_entry(resource->entry, struct vme_master_resource, list);
608
609         return bridge->master_rmw(image, mask, compare, swap, offset);
610 }
611 EXPORT_SYMBOL(vme_master_rmw);
612
613 void vme_master_free(struct vme_resource *resource)
614 {
615         struct vme_master_resource *master_image;
616
617         if (resource->type != VME_MASTER) {
618                 printk(KERN_ERR "Not a master resource\n");
619                 return;
620         }
621
622         master_image = list_entry(resource->entry, struct vme_master_resource,
623                 list);
624         if (master_image == NULL) {
625                 printk(KERN_ERR "Can't find master resource\n");
626                 return;
627         }
628
629         /* Unlock image */
630         spin_lock(&(master_image->lock));
631         if (master_image->locked == 0)
632                 printk(KERN_ERR "Image is already free\n");
633
634         master_image->locked = 0;
635         spin_unlock(&(master_image->lock));
636
637         /* Free up resource memory */
638         kfree(resource);
639 }
640 EXPORT_SYMBOL(vme_master_free);
641
642 /*
643  * Request a DMA controller with specific attributes, return some unique
644  * identifier.
645  */
646 struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
647 {
648         struct vme_bridge *bridge;
649         struct list_head *dma_pos = NULL;
650         struct vme_dma_resource *allocated_ctrlr = NULL;
651         struct vme_dma_resource *dma_ctrlr = NULL;
652         struct vme_resource *resource = NULL;
653
654         /* XXX Not checking resource attributes */
655         printk(KERN_ERR "No VME resource Attribute tests done\n");
656
657         bridge = dev_to_bridge(dev);
658         if (bridge == NULL) {
659                 printk(KERN_ERR "Can't find VME bus\n");
660                 goto err_bus;
661         }
662
663         /* Loop through DMA resources */
664         list_for_each(dma_pos, &(bridge->dma_resources)) {
665                 dma_ctrlr = list_entry(dma_pos,
666                         struct vme_dma_resource, list);
667
668                 if (dma_ctrlr == NULL) {
669                         printk(KERN_ERR "Registered NULL DMA resource\n");
670                         continue;
671                 }
672
673                 /* Find an unlocked and compatible controller */
674                 mutex_lock(&(dma_ctrlr->mtx));
675                 if (((dma_ctrlr->route_attr & route) == route) &&
676                         (dma_ctrlr->locked == 0)) {
677
678                         dma_ctrlr->locked = 1;
679                         mutex_unlock(&(dma_ctrlr->mtx));
680                         allocated_ctrlr = dma_ctrlr;
681                         break;
682                 }
683                 mutex_unlock(&(dma_ctrlr->mtx));
684         }
685
686         /* Check to see if we found a resource */
687         if (allocated_ctrlr == NULL)
688                 goto err_ctrlr;
689
690         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
691         if (resource == NULL) {
692                 printk(KERN_WARNING "Unable to allocate resource structure\n");
693                 goto err_alloc;
694         }
695         resource->type = VME_DMA;
696         resource->entry = &(allocated_ctrlr->list);
697
698         return resource;
699
700 err_alloc:
701         /* Unlock image */
702         mutex_lock(&(dma_ctrlr->mtx));
703         dma_ctrlr->locked = 0;
704         mutex_unlock(&(dma_ctrlr->mtx));
705 err_ctrlr:
706 err_bus:
707         return NULL;
708 }
709 EXPORT_SYMBOL(vme_dma_request);
710
711 /*
712  * Start new list
713  */
714 struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
715 {
716         struct vme_dma_resource *ctrlr;
717         struct vme_dma_list *dma_list;
718
719         if (resource->type != VME_DMA) {
720                 printk(KERN_ERR "Not a DMA resource\n");
721                 return NULL;
722         }
723
724         ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
725
726         dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
727         if (dma_list == NULL) {
728                 printk(KERN_ERR "Unable to allocate memory for new dma list\n");
729                 return NULL;
730         }
731         INIT_LIST_HEAD(&(dma_list->entries));
732         dma_list->parent = ctrlr;
733         mutex_init(&(dma_list->mtx));
734
735         return dma_list;
736 }
737 EXPORT_SYMBOL(vme_new_dma_list);
738
739 /*
740  * Create "Pattern" type attributes
741  */
742 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
743         vme_pattern_t type)
744 {
745         struct vme_dma_attr *attributes;
746         struct vme_dma_pattern *pattern_attr;
747
748         attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
749         if (attributes == NULL) {
750                 printk(KERN_ERR "Unable to allocate memory for attributes "
751                         "structure\n");
752                 goto err_attr;
753         }
754
755         pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
756         if (pattern_attr == NULL) {
757                 printk(KERN_ERR "Unable to allocate memory for pattern "
758                         "attributes\n");
759                 goto err_pat;
760         }
761
762         attributes->type = VME_DMA_PATTERN;
763         attributes->private = (void *)pattern_attr;
764
765         pattern_attr->pattern = pattern;
766         pattern_attr->type = type;
767
768         return attributes;
769
770         kfree(pattern_attr);
771 err_pat:
772         kfree(attributes);
773 err_attr:
774         return NULL;
775 }
776 EXPORT_SYMBOL(vme_dma_pattern_attribute);
777
778 /*
779  * Create "PCI" type attributes
780  */
781 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
782 {
783         struct vme_dma_attr *attributes;
784         struct vme_dma_pci *pci_attr;
785
786         /* XXX Run some sanity checks here */
787
788         attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
789         if (attributes == NULL) {
790                 printk(KERN_ERR "Unable to allocate memory for attributes "
791                         "structure\n");
792                 goto err_attr;
793         }
794
795         pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
796         if (pci_attr == NULL) {
797                 printk(KERN_ERR "Unable to allocate memory for pci "
798                         "attributes\n");
799                 goto err_pci;
800         }
801
802
803
804         attributes->type = VME_DMA_PCI;
805         attributes->private = (void *)pci_attr;
806
807         pci_attr->address = address;
808
809         return attributes;
810
811         kfree(pci_attr);
812 err_pci:
813         kfree(attributes);
814 err_attr:
815         return NULL;
816 }
817 EXPORT_SYMBOL(vme_dma_pci_attribute);
818
819 /*
820  * Create "VME" type attributes
821  */
822 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
823         vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
824 {
825         struct vme_dma_attr *attributes;
826         struct vme_dma_vme *vme_attr;
827
828         attributes = kmalloc(
829                 sizeof(struct vme_dma_attr), GFP_KERNEL);
830         if (attributes == NULL) {
831                 printk(KERN_ERR "Unable to allocate memory for attributes "
832                         "structure\n");
833                 goto err_attr;
834         }
835
836         vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
837         if (vme_attr == NULL) {
838                 printk(KERN_ERR "Unable to allocate memory for vme "
839                         "attributes\n");
840                 goto err_vme;
841         }
842
843         attributes->type = VME_DMA_VME;
844         attributes->private = (void *)vme_attr;
845
846         vme_attr->address = address;
847         vme_attr->aspace = aspace;
848         vme_attr->cycle = cycle;
849         vme_attr->dwidth = dwidth;
850
851         return attributes;
852
853         kfree(vme_attr);
854 err_vme:
855         kfree(attributes);
856 err_attr:
857         return NULL;
858 }
859 EXPORT_SYMBOL(vme_dma_vme_attribute);
860
861 /*
862  * Free attribute
863  */
864 void vme_dma_free_attribute(struct vme_dma_attr *attributes)
865 {
866         kfree(attributes->private);
867         kfree(attributes);
868 }
869 EXPORT_SYMBOL(vme_dma_free_attribute);
870
871 int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
872         struct vme_dma_attr *dest, size_t count)
873 {
874         struct vme_bridge *bridge = list->parent->parent;
875         int retval;
876
877         if (bridge->dma_list_add == NULL) {
878                 printk(KERN_WARNING "Link List DMA generation not supported\n");
879                 return -EINVAL;
880         }
881
882         if (!mutex_trylock(&(list->mtx))) {
883                 printk(KERN_ERR "Link List already submitted\n");
884                 return -EINVAL;
885         }
886
887         retval = bridge->dma_list_add(list, src, dest, count);
888
889         mutex_unlock(&(list->mtx));
890
891         return retval;
892 }
893 EXPORT_SYMBOL(vme_dma_list_add);
894
895 int vme_dma_list_exec(struct vme_dma_list *list)
896 {
897         struct vme_bridge *bridge = list->parent->parent;
898         int retval;
899
900         if (bridge->dma_list_exec == NULL) {
901                 printk(KERN_ERR "Link List DMA execution not supported\n");
902                 return -EINVAL;
903         }
904
905         mutex_lock(&(list->mtx));
906
907         retval = bridge->dma_list_exec(list);
908
909         mutex_unlock(&(list->mtx));
910
911         return retval;
912 }
913 EXPORT_SYMBOL(vme_dma_list_exec);
914
915 int vme_dma_list_free(struct vme_dma_list *list)
916 {
917         struct vme_bridge *bridge = list->parent->parent;
918         int retval;
919
920         if (bridge->dma_list_empty == NULL) {
921                 printk(KERN_WARNING "Emptying of Link Lists not supported\n");
922                 return -EINVAL;
923         }
924
925         if (!mutex_trylock(&(list->mtx))) {
926                 printk(KERN_ERR "Link List in use\n");
927                 return -EINVAL;
928         }
929
930         /*
931          * Empty out all of the entries from the dma list. We need to go to the
932          * low level driver as dma entries are driver specific.
933          */
934         retval = bridge->dma_list_empty(list);
935         if (retval) {
936                 printk(KERN_ERR "Unable to empty link-list entries\n");
937                 mutex_unlock(&(list->mtx));
938                 return retval;
939         }
940         mutex_unlock(&(list->mtx));
941         kfree(list);
942
943         return retval;
944 }
945 EXPORT_SYMBOL(vme_dma_list_free);
946
947 int vme_dma_free(struct vme_resource *resource)
948 {
949         struct vme_dma_resource *ctrlr;
950
951         if (resource->type != VME_DMA) {
952                 printk(KERN_ERR "Not a DMA resource\n");
953                 return -EINVAL;
954         }
955
956         ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
957
958         if (!mutex_trylock(&(ctrlr->mtx))) {
959                 printk(KERN_ERR "Resource busy, can't free\n");
960                 return -EBUSY;
961         }
962
963         if (!(list_empty(&(ctrlr->pending)) && list_empty(&(ctrlr->running)))) {
964                 printk(KERN_WARNING "Resource still processing transfers\n");
965                 mutex_unlock(&(ctrlr->mtx));
966                 return -EBUSY;
967         }
968
969         ctrlr->locked = 0;
970
971         mutex_unlock(&(ctrlr->mtx));
972
973         return 0;
974 }
975 EXPORT_SYMBOL(vme_dma_free);
976
977 void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
978 {
979         void (*call)(int, int, void *);
980         void *priv_data;
981
982         call = bridge->irq[level - 1].callback[statid].func;
983         priv_data = bridge->irq[level - 1].callback[statid].priv_data;
984
985         if (call != NULL)
986                 call(level, statid, priv_data);
987         else
988                 printk(KERN_WARNING "Spurilous VME interrupt, level:%x, "
989                         "vector:%x\n", level, statid);
990 }
991 EXPORT_SYMBOL(vme_irq_handler);
992
993 int vme_irq_request(struct device *dev, int level, int statid,
994         void (*callback)(int, int, void *),
995         void *priv_data)
996 {
997         struct vme_bridge *bridge;
998
999         bridge = dev_to_bridge(dev);
1000         if (bridge == NULL) {
1001                 printk(KERN_ERR "Can't find VME bus\n");
1002                 return -EINVAL;
1003         }
1004
1005         if ((level < 1) || (level > 7)) {
1006                 printk(KERN_ERR "Invalid interrupt level\n");
1007                 return -EINVAL;
1008         }
1009
1010         if (bridge->irq_set == NULL) {
1011                 printk(KERN_ERR "Configuring interrupts not supported\n");
1012                 return -EINVAL;
1013         }
1014
1015         mutex_lock(&(bridge->irq_mtx));
1016
1017         if (bridge->irq[level - 1].callback[statid].func) {
1018                 mutex_unlock(&(bridge->irq_mtx));
1019                 printk(KERN_WARNING "VME Interrupt already taken\n");
1020                 return -EBUSY;
1021         }
1022
1023         bridge->irq[level - 1].count++;
1024         bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1025         bridge->irq[level - 1].callback[statid].func = callback;
1026
1027         /* Enable IRQ level */
1028         bridge->irq_set(bridge, level, 1, 1);
1029
1030         mutex_unlock(&(bridge->irq_mtx));
1031
1032         return 0;
1033 }
1034 EXPORT_SYMBOL(vme_irq_request);
1035
1036 void vme_irq_free(struct device *dev, int level, int statid)
1037 {
1038         struct vme_bridge *bridge;
1039
1040         bridge = dev_to_bridge(dev);
1041         if (bridge == NULL) {
1042                 printk(KERN_ERR "Can't find VME bus\n");
1043                 return;
1044         }
1045
1046         if ((level < 1) || (level > 7)) {
1047                 printk(KERN_ERR "Invalid interrupt level\n");
1048                 return;
1049         }
1050
1051         if (bridge->irq_set == NULL) {
1052                 printk(KERN_ERR "Configuring interrupts not supported\n");
1053                 return;
1054         }
1055
1056         mutex_lock(&(bridge->irq_mtx));
1057
1058         bridge->irq[level - 1].count--;
1059
1060         /* Disable IRQ level if no more interrupts attached at this level*/
1061         if (bridge->irq[level - 1].count == 0)
1062                 bridge->irq_set(bridge, level, 0, 1);
1063
1064         bridge->irq[level - 1].callback[statid].func = NULL;
1065         bridge->irq[level - 1].callback[statid].priv_data = NULL;
1066
1067         mutex_unlock(&(bridge->irq_mtx));
1068 }
1069 EXPORT_SYMBOL(vme_irq_free);
1070
1071 int vme_irq_generate(struct device *dev, int level, int statid)
1072 {
1073         struct vme_bridge *bridge;
1074
1075         bridge = dev_to_bridge(dev);
1076         if (bridge == NULL) {
1077                 printk(KERN_ERR "Can't find VME bus\n");
1078                 return -EINVAL;
1079         }
1080
1081         if ((level < 1) || (level > 7)) {
1082                 printk(KERN_WARNING "Invalid interrupt level\n");
1083                 return -EINVAL;
1084         }
1085
1086         if (bridge->irq_generate == NULL) {
1087                 printk(KERN_WARNING "Interrupt generation not supported\n");
1088                 return -EINVAL;
1089         }
1090
1091         return bridge->irq_generate(bridge, level, statid);
1092 }
1093 EXPORT_SYMBOL(vme_irq_generate);
1094
1095 /*
1096  * Request the location monitor, return resource or NULL
1097  */
1098 struct vme_resource *vme_lm_request(struct device *dev)
1099 {
1100         struct vme_bridge *bridge;
1101         struct list_head *lm_pos = NULL;
1102         struct vme_lm_resource *allocated_lm = NULL;
1103         struct vme_lm_resource *lm = NULL;
1104         struct vme_resource *resource = NULL;
1105
1106         bridge = dev_to_bridge(dev);
1107         if (bridge == NULL) {
1108                 printk(KERN_ERR "Can't find VME bus\n");
1109                 goto err_bus;
1110         }
1111
1112         /* Loop through DMA resources */
1113         list_for_each(lm_pos, &(bridge->lm_resources)) {
1114                 lm = list_entry(lm_pos,
1115                         struct vme_lm_resource, list);
1116
1117                 if (lm == NULL) {
1118                         printk(KERN_ERR "Registered NULL Location Monitor "
1119                                 "resource\n");
1120                         continue;
1121                 }
1122
1123                 /* Find an unlocked controller */
1124                 mutex_lock(&(lm->mtx));
1125                 if (lm->locked == 0) {
1126                         lm->locked = 1;
1127                         mutex_unlock(&(lm->mtx));
1128                         allocated_lm = lm;
1129                         break;
1130                 }
1131                 mutex_unlock(&(lm->mtx));
1132         }
1133
1134         /* Check to see if we found a resource */
1135         if (allocated_lm == NULL)
1136                 goto err_lm;
1137
1138         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1139         if (resource == NULL) {
1140                 printk(KERN_ERR "Unable to allocate resource structure\n");
1141                 goto err_alloc;
1142         }
1143         resource->type = VME_LM;
1144         resource->entry = &(allocated_lm->list);
1145
1146         return resource;
1147
1148 err_alloc:
1149         /* Unlock image */
1150         mutex_lock(&(lm->mtx));
1151         lm->locked = 0;
1152         mutex_unlock(&(lm->mtx));
1153 err_lm:
1154 err_bus:
1155         return NULL;
1156 }
1157 EXPORT_SYMBOL(vme_lm_request);
1158
1159 int vme_lm_count(struct vme_resource *resource)
1160 {
1161         struct vme_lm_resource *lm;
1162
1163         if (resource->type != VME_LM) {
1164                 printk(KERN_ERR "Not a Location Monitor resource\n");
1165                 return -EINVAL;
1166         }
1167
1168         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1169
1170         return lm->monitors;
1171 }
1172 EXPORT_SYMBOL(vme_lm_count);
1173
1174 int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1175         vme_address_t aspace, vme_cycle_t cycle)
1176 {
1177         struct vme_bridge *bridge = find_bridge(resource);
1178         struct vme_lm_resource *lm;
1179
1180         if (resource->type != VME_LM) {
1181                 printk(KERN_ERR "Not a Location Monitor resource\n");
1182                 return -EINVAL;
1183         }
1184
1185         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1186
1187         if (bridge->lm_set == NULL) {
1188                 printk(KERN_ERR "vme_lm_set not supported\n");
1189                 return -EINVAL;
1190         }
1191
1192         return bridge->lm_set(lm, lm_base, aspace, cycle);
1193 }
1194 EXPORT_SYMBOL(vme_lm_set);
1195
1196 int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1197         vme_address_t *aspace, vme_cycle_t *cycle)
1198 {
1199         struct vme_bridge *bridge = find_bridge(resource);
1200         struct vme_lm_resource *lm;
1201
1202         if (resource->type != VME_LM) {
1203                 printk(KERN_ERR "Not a Location Monitor resource\n");
1204                 return -EINVAL;
1205         }
1206
1207         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1208
1209         if (bridge->lm_get == NULL) {
1210                 printk(KERN_ERR "vme_lm_get not supported\n");
1211                 return -EINVAL;
1212         }
1213
1214         return bridge->lm_get(lm, lm_base, aspace, cycle);
1215 }
1216 EXPORT_SYMBOL(vme_lm_get);
1217
1218 int vme_lm_attach(struct vme_resource *resource, int monitor,
1219         void (*callback)(int))
1220 {
1221         struct vme_bridge *bridge = find_bridge(resource);
1222         struct vme_lm_resource *lm;
1223
1224         if (resource->type != VME_LM) {
1225                 printk(KERN_ERR "Not a Location Monitor resource\n");
1226                 return -EINVAL;
1227         }
1228
1229         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1230
1231         if (bridge->lm_attach == NULL) {
1232                 printk(KERN_ERR "vme_lm_attach not supported\n");
1233                 return -EINVAL;
1234         }
1235
1236         return bridge->lm_attach(lm, monitor, callback);
1237 }
1238 EXPORT_SYMBOL(vme_lm_attach);
1239
1240 int vme_lm_detach(struct vme_resource *resource, int monitor)
1241 {
1242         struct vme_bridge *bridge = find_bridge(resource);
1243         struct vme_lm_resource *lm;
1244
1245         if (resource->type != VME_LM) {
1246                 printk(KERN_ERR "Not a Location Monitor resource\n");
1247                 return -EINVAL;
1248         }
1249
1250         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1251
1252         if (bridge->lm_detach == NULL) {
1253                 printk(KERN_ERR "vme_lm_detach not supported\n");
1254                 return -EINVAL;
1255         }
1256
1257         return bridge->lm_detach(lm, monitor);
1258 }
1259 EXPORT_SYMBOL(vme_lm_detach);
1260
1261 void vme_lm_free(struct vme_resource *resource)
1262 {
1263         struct vme_lm_resource *lm;
1264
1265         if (resource->type != VME_LM) {
1266                 printk(KERN_ERR "Not a Location Monitor resource\n");
1267                 return;
1268         }
1269
1270         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1271
1272         mutex_lock(&(lm->mtx));
1273
1274         /* XXX
1275          * Check to see that there aren't any callbacks still attached, if
1276          * there are we should probably be detaching them!
1277          */
1278
1279         lm->locked = 0;
1280
1281         mutex_unlock(&(lm->mtx));
1282
1283         kfree(resource);
1284 }
1285 EXPORT_SYMBOL(vme_lm_free);
1286
1287 int vme_slot_get(struct device *bus)
1288 {
1289         struct vme_bridge *bridge;
1290
1291         bridge = dev_to_bridge(bus);
1292         if (bridge == NULL) {
1293                 printk(KERN_ERR "Can't find VME bus\n");
1294                 return -EINVAL;
1295         }
1296
1297         if (bridge->slot_get == NULL) {
1298                 printk(KERN_WARNING "vme_slot_get not supported\n");
1299                 return -EINVAL;
1300         }
1301
1302         return bridge->slot_get(bridge);
1303 }
1304 EXPORT_SYMBOL(vme_slot_get);
1305
1306
1307 /* - Bridge Registration --------------------------------------------------- */
1308
1309 static int vme_alloc_bus_num(void)
1310 {
1311         int i;
1312
1313         mutex_lock(&vme_bus_num_mtx);
1314         for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1315                 if (((vme_bus_numbers >> i) & 0x1) == 0) {
1316                         vme_bus_numbers |= (0x1 << i);
1317                         break;
1318                 }
1319         }
1320         mutex_unlock(&vme_bus_num_mtx);
1321
1322         return i;
1323 }
1324
1325 static void vme_free_bus_num(int bus)
1326 {
1327         mutex_lock(&vme_bus_num_mtx);
1328         vme_bus_numbers |= ~(0x1 << bus);
1329         mutex_unlock(&vme_bus_num_mtx);
1330 }
1331
1332 int vme_register_bridge(struct vme_bridge *bridge)
1333 {
1334         struct device *dev;
1335         int retval;
1336         int i;
1337
1338         bridge->num = vme_alloc_bus_num();
1339
1340         /* This creates 32 vme "slot" devices. This equates to a slot for each
1341          * ID available in a system conforming to the ANSI/VITA 1-1994
1342          * specification.
1343          */
1344         for (i = 0; i < VME_SLOTS_MAX; i++) {
1345                 dev = &(bridge->dev[i]);
1346                 memset(dev, 0, sizeof(struct device));
1347
1348                 dev->parent = bridge->parent;
1349                 dev->bus = &(vme_bus_type);
1350                 /*
1351                  * We save a pointer to the bridge in platform_data so that we
1352                  * can get to it later. We keep driver_data for use by the
1353                  * driver that binds against the slot
1354                  */
1355                 dev->platform_data = bridge;
1356                 dev_set_name(dev, "vme-%x.%x", bridge->num, i + 1);
1357
1358                 retval = device_register(dev);
1359                 if (retval)
1360                         goto err_reg;
1361         }
1362
1363         return retval;
1364
1365         i = VME_SLOTS_MAX;
1366 err_reg:
1367         while (i > -1) {
1368                 dev = &(bridge->dev[i]);
1369                 device_unregister(dev);
1370         }
1371         vme_free_bus_num(bridge->num);
1372         return retval;
1373 }
1374 EXPORT_SYMBOL(vme_register_bridge);
1375
1376 void vme_unregister_bridge(struct vme_bridge *bridge)
1377 {
1378         int i;
1379         struct device *dev;
1380
1381
1382         for (i = 0; i < VME_SLOTS_MAX; i++) {
1383                 dev = &(bridge->dev[i]);
1384                 device_unregister(dev);
1385         }
1386         vme_free_bus_num(bridge->num);
1387 }
1388 EXPORT_SYMBOL(vme_unregister_bridge);
1389
1390
1391 /* - Driver Registration --------------------------------------------------- */
1392
1393 int vme_register_driver(struct vme_driver *drv)
1394 {
1395         drv->driver.name = drv->name;
1396         drv->driver.bus = &vme_bus_type;
1397
1398         return driver_register(&drv->driver);
1399 }
1400 EXPORT_SYMBOL(vme_register_driver);
1401
1402 void vme_unregister_driver(struct vme_driver *drv)
1403 {
1404         driver_unregister(&drv->driver);
1405 }
1406 EXPORT_SYMBOL(vme_unregister_driver);
1407
1408 /* - Bus Registration ------------------------------------------------------ */
1409
1410 int vme_calc_slot(struct device *dev)
1411 {
1412         struct vme_bridge *bridge;
1413         int num;
1414
1415         bridge = dev_to_bridge(dev);
1416
1417         /* Determine slot number */
1418         num = 0;
1419         while (num < VME_SLOTS_MAX) {
1420                 if (&(bridge->dev[num]) == dev)
1421                         break;
1422
1423                 num++;
1424         }
1425         if (num == VME_SLOTS_MAX) {
1426                 dev_err(dev, "Failed to identify slot\n");
1427                 num = 0;
1428                 goto err_dev;
1429         }
1430         num++;
1431
1432 err_dev:
1433         return num;
1434 }
1435
1436 static struct vme_driver *dev_to_vme_driver(struct device *dev)
1437 {
1438         if (dev->driver == NULL)
1439                 printk(KERN_ERR "Bugger dev->driver is NULL\n");
1440
1441         return container_of(dev->driver, struct vme_driver, driver);
1442 }
1443
1444 static int vme_bus_match(struct device *dev, struct device_driver *drv)
1445 {
1446         struct vme_bridge *bridge;
1447         struct vme_driver *driver;
1448         int i, num;
1449
1450         bridge = dev_to_bridge(dev);
1451         driver = container_of(drv, struct vme_driver, driver);
1452
1453         num = vme_calc_slot(dev);
1454         if (!num)
1455                 goto err_dev;
1456
1457         if (driver->bind_table == NULL) {
1458                 dev_err(dev, "Bind table NULL\n");
1459                 goto err_table;
1460         }
1461
1462         i = 0;
1463         while ((driver->bind_table[i].bus != 0) ||
1464                 (driver->bind_table[i].slot != 0)) {
1465
1466                 if (bridge->num == driver->bind_table[i].bus) {
1467                         if (num == driver->bind_table[i].slot)
1468                                 return 1;
1469
1470                         if (driver->bind_table[i].slot == VME_SLOT_ALL)
1471                                 return 1;
1472
1473                         if ((driver->bind_table[i].slot == VME_SLOT_CURRENT) &&
1474                                 (num == vme_slot_get(dev)))
1475                                 return 1;
1476                 }
1477                 i++;
1478         }
1479
1480 err_dev:
1481 err_table:
1482         return 0;
1483 }
1484
1485 static int vme_bus_probe(struct device *dev)
1486 {
1487         struct vme_bridge *bridge;
1488         struct vme_driver *driver;
1489         int retval = -ENODEV;
1490
1491         driver = dev_to_vme_driver(dev);
1492         bridge = dev_to_bridge(dev);
1493
1494         if (driver->probe != NULL)
1495                 retval = driver->probe(dev, bridge->num, vme_calc_slot(dev));
1496
1497         return retval;
1498 }
1499
1500 static int vme_bus_remove(struct device *dev)
1501 {
1502         struct vme_bridge *bridge;
1503         struct vme_driver *driver;
1504         int retval = -ENODEV;
1505
1506         driver = dev_to_vme_driver(dev);
1507         bridge = dev_to_bridge(dev);
1508
1509         if (driver->remove != NULL)
1510                 retval = driver->remove(dev, bridge->num, vme_calc_slot(dev));
1511
1512         return retval;
1513 }
1514
1515 struct bus_type vme_bus_type = {
1516         .name = "vme",
1517         .match = vme_bus_match,
1518         .probe = vme_bus_probe,
1519         .remove = vme_bus_remove,
1520 };
1521 EXPORT_SYMBOL(vme_bus_type);
1522
1523 static int __init vme_init(void)
1524 {
1525         return bus_register(&vme_bus_type);
1526 }
1527
1528 static void __exit vme_exit(void)
1529 {
1530         bus_unregister(&vme_bus_type);
1531 }
1532
1533 MODULE_DESCRIPTION("VME bridge driver framework");
1534 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1535 MODULE_LICENSE("GPL");
1536
1537 module_init(vme_init);
1538 module_exit(vme_exit);