994fdb9b212754ae706bc31b73a37eafa7d019b3
[pandora-kernel.git] / drivers / staging / vme / vme.c
1 /*
2  * VME Bridge Framework
3  *
4  * Author: Martyn Welch <martyn.welch@gefanuc.com>
5  * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by Tom Armistead and Ajit Prem
8  * Copyright 2004 Motorola Inc.
9  *
10  * This program is free software; you can redistribute  it and/or modify it
11  * under  the terms of  the GNU General  Public License as published by the
12  * Free Software Foundation;  either version 2 of the  License, or (at your
13  * option) any later version.
14  */
15
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32
33 #include "vme.h"
34 #include "vme_bridge.h"
35
36 /* Bitmask and mutex to keep track of bridge numbers */
37 static unsigned int vme_bus_numbers;
38 DEFINE_MUTEX(vme_bus_num_mtx);
39
40 static void __exit vme_exit (void);
41 static int __init vme_init (void);
42
43
44 /*
45  * Find the bridge resource associated with a specific device resource
46  */
47 static struct vme_bridge *dev_to_bridge(struct device *dev)
48 {
49         return dev->platform_data;
50 }
51
52 /*
53  * Find the bridge that the resource is associated with.
54  */
55 static struct vme_bridge *find_bridge(struct vme_resource *resource)
56 {
57         /* Get list to search */
58         switch (resource->type) {
59         case VME_MASTER:
60                 return list_entry(resource->entry, struct vme_master_resource,
61                         list)->parent;
62                 break;
63         case VME_SLAVE:
64                 return list_entry(resource->entry, struct vme_slave_resource,
65                         list)->parent;
66                 break;
67         case VME_DMA:
68                 return list_entry(resource->entry, struct vme_dma_resource,
69                         list)->parent;
70                 break;
71         case VME_LM:
72                 return list_entry(resource->entry, struct vme_lm_resource,
73                         list)->parent;
74                 break;
75         default:
76                 printk(KERN_ERR "Unknown resource type\n");
77                 return NULL;
78                 break;
79         }
80 }
81
82 /*
83  * Allocate a contiguous block of memory for use by the driver. This is used to
84  * create the buffers for the slave windows.
85  *
86  * XXX VME bridges could be available on buses other than PCI. At the momment
87  *     this framework only supports PCI devices.
88  */
89 void * vme_alloc_consistent(struct vme_resource *resource, size_t size,
90         dma_addr_t *dma)
91 {
92         struct vme_bridge *bridge;
93         struct pci_dev *pdev;
94
95         if(resource == NULL) {
96                 printk("No resource\n");
97                 return NULL;
98         }
99
100         bridge = find_bridge(resource);
101         if(bridge == NULL) {
102                 printk("Can't find bridge\n");
103                 return NULL;
104         }
105
106         /* Find pci_dev container of dev */
107         if (bridge->parent == NULL) {
108                 printk("Dev entry NULL\n");
109                 return NULL;
110         }
111         pdev = container_of(bridge->parent, struct pci_dev, dev);
112
113         return pci_alloc_consistent(pdev, size, dma);
114 }
115 EXPORT_SYMBOL(vme_alloc_consistent);
116
117 /*
118  * Free previously allocated contiguous block of memory.
119  *
120  * XXX VME bridges could be available on buses other than PCI. At the momment
121  *     this framework only supports PCI devices.
122  */
123 void vme_free_consistent(struct vme_resource *resource, size_t size,
124         void *vaddr, dma_addr_t dma)
125 {
126         struct vme_bridge *bridge;
127         struct pci_dev *pdev;
128
129         if(resource == NULL) {
130                 printk("No resource\n");
131                 return;
132         }
133
134         bridge = find_bridge(resource);
135         if(bridge == NULL) {
136                 printk("Can't find bridge\n");
137                 return;
138         }
139
140         /* Find pci_dev container of dev */
141         pdev = container_of(bridge->parent, struct pci_dev, dev);
142
143         pci_free_consistent(pdev, size, vaddr, dma);
144 }
145 EXPORT_SYMBOL(vme_free_consistent);
146
147 size_t vme_get_size(struct vme_resource *resource)
148 {
149         int enabled, retval;
150         unsigned long long base, size;
151         dma_addr_t buf_base;
152         vme_address_t aspace;
153         vme_cycle_t cycle;
154         vme_width_t dwidth;
155
156         switch (resource->type) {
157         case VME_MASTER:
158                 retval = vme_master_get(resource, &enabled, &base, &size,
159                         &aspace, &cycle, &dwidth);
160
161                 return size;
162                 break;
163         case VME_SLAVE:
164                 retval = vme_slave_get(resource, &enabled, &base, &size,
165                         &buf_base, &aspace, &cycle);
166
167                 return size;
168                 break;
169         case VME_DMA:
170                 return 0;
171                 break;
172         default:
173                 printk(KERN_ERR "Unknown resource type\n");
174                 return 0;
175                 break;
176         }
177 }
178 EXPORT_SYMBOL(vme_get_size);
179
180 static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
181         unsigned long long size)
182 {
183         int retval = 0;
184
185         switch (aspace) {
186         case VME_A16:
187                 if (((vme_base + size) > VME_A16_MAX) ||
188                                 (vme_base > VME_A16_MAX))
189                         retval = -EFAULT;
190                 break;
191         case VME_A24:
192                 if (((vme_base + size) > VME_A24_MAX) ||
193                                 (vme_base > VME_A24_MAX))
194                         retval = -EFAULT;
195                 break;
196         case VME_A32:
197                 if (((vme_base + size) > VME_A32_MAX) ||
198                                 (vme_base > VME_A32_MAX))
199                         retval = -EFAULT;
200                 break;
201         case VME_A64:
202                 /*
203                  * Any value held in an unsigned long long can be used as the
204                  * base
205                  */
206                 break;
207         case VME_CRCSR:
208                 if (((vme_base + size) > VME_CRCSR_MAX) ||
209                                 (vme_base > VME_CRCSR_MAX))
210                         retval = -EFAULT;
211                 break;
212         case VME_USER1:
213         case VME_USER2:
214         case VME_USER3:
215         case VME_USER4:
216                 /* User Defined */
217                 break;
218         default:
219                 printk("Invalid address space\n");
220                 retval = -EINVAL;
221                 break;
222         }
223
224         return retval;
225 }
226
227 /*
228  * Request a slave image with specific attributes, return some unique
229  * identifier.
230  */
231 struct vme_resource * vme_slave_request(struct device *dev,
232         vme_address_t address, vme_cycle_t cycle)
233 {
234         struct vme_bridge *bridge;
235         struct list_head *slave_pos = NULL;
236         struct vme_slave_resource *allocated_image = NULL;
237         struct vme_slave_resource *slave_image = NULL;
238         struct vme_resource *resource = NULL;
239
240         bridge = dev_to_bridge(dev);
241         if (bridge == NULL) {
242                 printk(KERN_ERR "Can't find VME bus\n");
243                 goto err_bus;
244         }
245
246         /* Loop through slave resources */
247         list_for_each(slave_pos, &(bridge->slave_resources)) {
248                 slave_image = list_entry(slave_pos,
249                         struct vme_slave_resource, list);
250
251                 if (slave_image == NULL) {
252                         printk("Registered NULL Slave resource\n");
253                         continue;
254                 }
255
256                 /* Find an unlocked and compatible image */
257                 mutex_lock(&(slave_image->mtx));
258                 if(((slave_image->address_attr & address) == address) &&
259                         ((slave_image->cycle_attr & cycle) == cycle) &&
260                         (slave_image->locked == 0)) {
261
262                         slave_image->locked = 1;
263                         mutex_unlock(&(slave_image->mtx));
264                         allocated_image = slave_image;
265                         break;
266                 }
267                 mutex_unlock(&(slave_image->mtx));
268         }
269
270         /* No free image */
271         if (allocated_image == NULL)
272                 goto err_image;
273
274         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
275         if (resource == NULL) {
276                 printk(KERN_WARNING "Unable to allocate resource structure\n");
277                 goto err_alloc;
278         }
279         resource->type = VME_SLAVE;
280         resource->entry = &(allocated_image->list);
281
282         return resource;
283
284 err_alloc:
285         /* Unlock image */
286         mutex_lock(&(slave_image->mtx));
287         slave_image->locked = 0;
288         mutex_unlock(&(slave_image->mtx));
289 err_image:
290 err_bus:
291         return NULL;
292 }
293 EXPORT_SYMBOL(vme_slave_request);
294
295 int vme_slave_set (struct vme_resource *resource, int enabled,
296         unsigned long long vme_base, unsigned long long size,
297         dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
298 {
299         struct vme_bridge *bridge = find_bridge(resource);
300         struct vme_slave_resource *image;
301         int retval;
302
303         if (resource->type != VME_SLAVE) {
304                 printk("Not a slave resource\n");
305                 return -EINVAL;
306         }
307
308         image = list_entry(resource->entry, struct vme_slave_resource, list);
309
310         if (bridge->slave_set == NULL) {
311                 printk("Function not supported\n");
312                 return -ENOSYS;
313         }
314
315         if(!(((image->address_attr & aspace) == aspace) &&
316                 ((image->cycle_attr & cycle) == cycle))) {
317                 printk("Invalid attributes\n");
318                 return -EINVAL;
319         }
320
321         retval = vme_check_window(aspace, vme_base, size);
322         if(retval)
323                 return retval;
324
325         return bridge->slave_set(image, enabled, vme_base, size, buf_base,
326                 aspace, cycle);
327 }
328 EXPORT_SYMBOL(vme_slave_set);
329
330 int vme_slave_get (struct vme_resource *resource, int *enabled,
331         unsigned long long *vme_base, unsigned long long *size,
332         dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
333 {
334         struct vme_bridge *bridge = find_bridge(resource);
335         struct vme_slave_resource *image;
336
337         if (resource->type != VME_SLAVE) {
338                 printk("Not a slave resource\n");
339                 return -EINVAL;
340         }
341
342         image = list_entry(resource->entry, struct vme_slave_resource, list);
343
344         if (bridge->slave_get == NULL) {
345                 printk("vme_slave_get not supported\n");
346                 return -EINVAL;
347         }
348
349         return bridge->slave_get(image, enabled, vme_base, size, buf_base,
350                 aspace, cycle);
351 }
352 EXPORT_SYMBOL(vme_slave_get);
353
354 void vme_slave_free(struct vme_resource *resource)
355 {
356         struct vme_slave_resource *slave_image;
357
358         if (resource->type != VME_SLAVE) {
359                 printk("Not a slave resource\n");
360                 return;
361         }
362
363         slave_image = list_entry(resource->entry, struct vme_slave_resource,
364                 list);
365         if (slave_image == NULL) {
366                 printk("Can't find slave resource\n");
367                 return;
368         }
369
370         /* Unlock image */
371         mutex_lock(&(slave_image->mtx));
372         if (slave_image->locked == 0)
373                 printk(KERN_ERR "Image is already free\n");
374
375         slave_image->locked = 0;
376         mutex_unlock(&(slave_image->mtx));
377
378         /* Free up resource memory */
379         kfree(resource);
380 }
381 EXPORT_SYMBOL(vme_slave_free);
382
383 /*
384  * Request a master image with specific attributes, return some unique
385  * identifier.
386  */
387 struct vme_resource * vme_master_request(struct device *dev,
388         vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
389 {
390         struct vme_bridge *bridge;
391         struct list_head *master_pos = NULL;
392         struct vme_master_resource *allocated_image = NULL;
393         struct vme_master_resource *master_image = NULL;
394         struct vme_resource *resource = NULL;
395
396         bridge = dev_to_bridge(dev);
397         if (bridge == NULL) {
398                 printk(KERN_ERR "Can't find VME bus\n");
399                 goto err_bus;
400         }
401
402         /* Loop through master resources */
403         list_for_each(master_pos, &(bridge->master_resources)) {
404                 master_image = list_entry(master_pos,
405                         struct vme_master_resource, list);
406
407                 if (master_image == NULL) {
408                         printk(KERN_WARNING "Registered NULL master resource\n");
409                         continue;
410                 }
411
412                 /* Find an unlocked and compatible image */
413                 spin_lock(&(master_image->lock));
414                 if(((master_image->address_attr & address) == address) &&
415                         ((master_image->cycle_attr & cycle) == cycle) &&
416                         ((master_image->width_attr & dwidth) == dwidth) &&
417                         (master_image->locked == 0)) {
418
419                         master_image->locked = 1;
420                         spin_unlock(&(master_image->lock));
421                         allocated_image = master_image;
422                         break;
423                 }
424                 spin_unlock(&(master_image->lock));
425         }
426
427         /* Check to see if we found a resource */
428         if (allocated_image == NULL) {
429                 printk(KERN_ERR "Can't find a suitable resource\n");
430                 goto err_image;
431         }
432
433         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
434         if (resource == NULL) {
435                 printk(KERN_ERR "Unable to allocate resource structure\n");
436                 goto err_alloc;
437         }
438         resource->type = VME_MASTER;
439         resource->entry = &(allocated_image->list);
440
441         return resource;
442
443         kfree(resource);
444 err_alloc:
445         /* Unlock image */
446         spin_lock(&(master_image->lock));
447         master_image->locked = 0;
448         spin_unlock(&(master_image->lock));
449 err_image:
450 err_bus:
451         return NULL;
452 }
453 EXPORT_SYMBOL(vme_master_request);
454
455 int vme_master_set (struct vme_resource *resource, int enabled,
456         unsigned long long vme_base, unsigned long long size,
457         vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
458 {
459         struct vme_bridge *bridge = find_bridge(resource);
460         struct vme_master_resource *image;
461         int retval;
462
463         if (resource->type != VME_MASTER) {
464                 printk("Not a master resource\n");
465                 return -EINVAL;
466         }
467
468         image = list_entry(resource->entry, struct vme_master_resource, list);
469
470         if (bridge->master_set == NULL) {
471                 printk("vme_master_set not supported\n");
472                 return -EINVAL;
473         }
474
475         if(!(((image->address_attr & aspace) == aspace) &&
476                 ((image->cycle_attr & cycle) == cycle) &&
477                 ((image->width_attr & dwidth) == dwidth))) {
478                 printk("Invalid attributes\n");
479                 return -EINVAL;
480         }
481
482         retval = vme_check_window(aspace, vme_base, size);
483         if(retval)
484                 return retval;
485
486         return bridge->master_set(image, enabled, vme_base, size, aspace,
487                 cycle, dwidth);
488 }
489 EXPORT_SYMBOL(vme_master_set);
490
491 int vme_master_get (struct vme_resource *resource, int *enabled,
492         unsigned long long *vme_base, unsigned long long *size,
493         vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
494 {
495         struct vme_bridge *bridge = find_bridge(resource);
496         struct vme_master_resource *image;
497
498         if (resource->type != VME_MASTER) {
499                 printk("Not a master resource\n");
500                 return -EINVAL;
501         }
502
503         image = list_entry(resource->entry, struct vme_master_resource, list);
504
505         if (bridge->master_get == NULL) {
506                 printk("vme_master_set not supported\n");
507                 return -EINVAL;
508         }
509
510         return bridge->master_get(image, enabled, vme_base, size, aspace,
511                 cycle, dwidth);
512 }
513 EXPORT_SYMBOL(vme_master_get);
514
515 /*
516  * Read data out of VME space into a buffer.
517  */
518 ssize_t vme_master_read (struct vme_resource *resource, void *buf, size_t count,
519         loff_t offset)
520 {
521         struct vme_bridge *bridge = find_bridge(resource);
522         struct vme_master_resource *image;
523         size_t length;
524
525         if (bridge->master_read == NULL) {
526                 printk("Reading from resource not supported\n");
527                 return -EINVAL;
528         }
529
530         if (resource->type != VME_MASTER) {
531                 printk("Not a master resource\n");
532                 return -EINVAL;
533         }
534
535         image = list_entry(resource->entry, struct vme_master_resource, list);
536
537         length = vme_get_size(resource);
538
539         if (offset > length) {
540                 printk("Invalid Offset\n");
541                 return -EFAULT;
542         }
543
544         if ((offset + count) > length)
545                 count = length - offset;
546
547         return bridge->master_read(image, buf, count, offset);
548
549 }
550 EXPORT_SYMBOL(vme_master_read);
551
552 /*
553  * Write data out to VME space from a buffer.
554  */
555 ssize_t vme_master_write (struct vme_resource *resource, void *buf,
556         size_t count, loff_t offset)
557 {
558         struct vme_bridge *bridge = find_bridge(resource);
559         struct vme_master_resource *image;
560         size_t length;
561
562         if (bridge->master_write == NULL) {
563                 printk("Writing to resource not supported\n");
564                 return -EINVAL;
565         }
566
567         if (resource->type != VME_MASTER) {
568                 printk("Not a master resource\n");
569                 return -EINVAL;
570         }
571
572         image = list_entry(resource->entry, struct vme_master_resource, list);
573
574         length = vme_get_size(resource);
575
576         if (offset > length) {
577                 printk("Invalid Offset\n");
578                 return -EFAULT;
579         }
580
581         if ((offset + count) > length)
582                 count = length - offset;
583
584         return bridge->master_write(image, buf, count, offset);
585 }
586 EXPORT_SYMBOL(vme_master_write);
587
588 /*
589  * Perform RMW cycle to provided location.
590  */
591 unsigned int vme_master_rmw (struct vme_resource *resource, unsigned int mask,
592         unsigned int compare, unsigned int swap, loff_t offset)
593 {
594         struct vme_bridge *bridge = find_bridge(resource);
595         struct vme_master_resource *image;
596
597         if (bridge->master_rmw == NULL) {
598                 printk("Writing to resource not supported\n");
599                 return -EINVAL;
600         }
601
602         if (resource->type != VME_MASTER) {
603                 printk("Not a master resource\n");
604                 return -EINVAL;
605         }
606
607         image = list_entry(resource->entry, struct vme_master_resource, list);
608
609         return bridge->master_rmw(image, mask, compare, swap, offset);
610 }
611 EXPORT_SYMBOL(vme_master_rmw);
612
613 void vme_master_free(struct vme_resource *resource)
614 {
615         struct vme_master_resource *master_image;
616
617         if (resource->type != VME_MASTER) {
618                 printk("Not a master resource\n");
619                 return;
620         }
621
622         master_image = list_entry(resource->entry, struct vme_master_resource,
623                 list);
624         if (master_image == NULL) {
625                 printk("Can't find master resource\n");
626                 return;
627         }
628
629         /* Unlock image */
630         spin_lock(&(master_image->lock));
631         if (master_image->locked == 0)
632                 printk(KERN_ERR "Image is already free\n");
633
634         master_image->locked = 0;
635         spin_unlock(&(master_image->lock));
636
637         /* Free up resource memory */
638         kfree(resource);
639 }
640 EXPORT_SYMBOL(vme_master_free);
641
642 /*
643  * Request a DMA controller with specific attributes, return some unique
644  * identifier.
645  */
646 struct vme_resource *vme_dma_request(struct device *dev)
647 {
648         struct vme_bridge *bridge;
649         struct list_head *dma_pos = NULL;
650         struct vme_dma_resource *allocated_ctrlr = NULL;
651         struct vme_dma_resource *dma_ctrlr = NULL;
652         struct vme_resource *resource = NULL;
653
654         /* XXX Not checking resource attributes */
655         printk(KERN_ERR "No VME resource Attribute tests done\n");
656
657         bridge = dev_to_bridge(dev);
658         if (bridge == NULL) {
659                 printk(KERN_ERR "Can't find VME bus\n");
660                 goto err_bus;
661         }
662
663         /* Loop through DMA resources */
664         list_for_each(dma_pos, &(bridge->dma_resources)) {
665                 dma_ctrlr = list_entry(dma_pos,
666                         struct vme_dma_resource, list);
667
668                 if (dma_ctrlr == NULL) {
669                         printk("Registered NULL DMA resource\n");
670                         continue;
671                 }
672
673                 /* Find an unlocked controller */
674                 mutex_lock(&(dma_ctrlr->mtx));
675                 if(dma_ctrlr->locked == 0) {
676                         dma_ctrlr->locked = 1;
677                         mutex_unlock(&(dma_ctrlr->mtx));
678                         allocated_ctrlr = dma_ctrlr;
679                         break;
680                 }
681                 mutex_unlock(&(dma_ctrlr->mtx));
682         }
683
684         /* Check to see if we found a resource */
685         if (allocated_ctrlr == NULL)
686                 goto err_ctrlr;
687
688         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
689         if (resource == NULL) {
690                 printk(KERN_WARNING "Unable to allocate resource structure\n");
691                 goto err_alloc;
692         }
693         resource->type = VME_DMA;
694         resource->entry = &(allocated_ctrlr->list);
695
696         return resource;
697
698 err_alloc:
699         /* Unlock image */
700         mutex_lock(&(dma_ctrlr->mtx));
701         dma_ctrlr->locked = 0;
702         mutex_unlock(&(dma_ctrlr->mtx));
703 err_ctrlr:
704 err_bus:
705         return NULL;
706 }
707 EXPORT_SYMBOL(vme_dma_request);
708
709 /*
710  * Start new list
711  */
712 struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
713 {
714         struct vme_dma_resource *ctrlr;
715         struct vme_dma_list *dma_list;
716
717         if (resource->type != VME_DMA) {
718                 printk("Not a DMA resource\n");
719                 return NULL;
720         }
721
722         ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
723
724         dma_list = (struct vme_dma_list *)kmalloc(
725                 sizeof(struct vme_dma_list), GFP_KERNEL);
726         if(dma_list == NULL) {
727                 printk("Unable to allocate memory for new dma list\n");
728                 return NULL;
729         }
730         INIT_LIST_HEAD(&(dma_list->entries));
731         dma_list->parent = ctrlr;
732         mutex_init(&(dma_list->mtx));
733
734         return dma_list;
735 }
736 EXPORT_SYMBOL(vme_new_dma_list);
737
738 /*
739  * Create "Pattern" type attributes
740  */
741 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
742         vme_pattern_t type)
743 {
744         struct vme_dma_attr *attributes;
745         struct vme_dma_pattern *pattern_attr;
746
747         attributes = (struct vme_dma_attr *)kmalloc(
748                 sizeof(struct vme_dma_attr), GFP_KERNEL);
749         if(attributes == NULL) {
750                 printk("Unable to allocate memory for attributes structure\n");
751                 goto err_attr;
752         }
753
754         pattern_attr = (struct vme_dma_pattern *)kmalloc(
755                 sizeof(struct vme_dma_pattern), GFP_KERNEL);
756         if(pattern_attr == NULL) {
757                 printk("Unable to allocate memory for pattern attributes\n");
758                 goto err_pat;
759         }
760
761         attributes->type = VME_DMA_PATTERN;
762         attributes->private = (void *)pattern_attr;
763
764         pattern_attr->pattern = pattern;
765         pattern_attr->type = type;
766
767         return attributes;
768
769         kfree(pattern_attr);
770 err_pat:
771         kfree(attributes);
772 err_attr:
773         return NULL;
774 }
775 EXPORT_SYMBOL(vme_dma_pattern_attribute);
776
777 /*
778  * Create "PCI" type attributes
779  */
780 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
781 {
782         struct vme_dma_attr *attributes;
783         struct vme_dma_pci *pci_attr;
784
785         /* XXX Run some sanity checks here */
786
787         attributes = (struct vme_dma_attr *)kmalloc(
788                 sizeof(struct vme_dma_attr), GFP_KERNEL);
789         if(attributes == NULL) {
790                 printk("Unable to allocate memory for attributes structure\n");
791                 goto err_attr;
792         }
793
794         pci_attr = (struct vme_dma_pci *)kmalloc(sizeof(struct vme_dma_pci),
795                 GFP_KERNEL);
796         if(pci_attr == NULL) {
797                 printk("Unable to allocate memory for pci attributes\n");
798                 goto err_pci;
799         }
800
801
802
803         attributes->type = VME_DMA_PCI;
804         attributes->private = (void *)pci_attr;
805
806         pci_attr->address = address;
807
808         return attributes;
809
810         kfree(pci_attr);
811 err_pci:
812         kfree(attributes);
813 err_attr:
814         return NULL;
815 }
816 EXPORT_SYMBOL(vme_dma_pci_attribute);
817
818 /*
819  * Create "VME" type attributes
820  */
821 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
822         vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
823 {
824         struct vme_dma_attr *attributes;
825         struct vme_dma_vme *vme_attr;
826
827         /* XXX Run some sanity checks here */
828
829         attributes = (struct vme_dma_attr *)kmalloc(
830                 sizeof(struct vme_dma_attr), GFP_KERNEL);
831         if(attributes == NULL) {
832                 printk("Unable to allocate memory for attributes structure\n");
833                 goto err_attr;
834         }
835
836         vme_attr = (struct vme_dma_vme *)kmalloc(sizeof(struct vme_dma_vme),
837                 GFP_KERNEL);
838         if(vme_attr == NULL) {
839                 printk("Unable to allocate memory for vme attributes\n");
840                 goto err_vme;
841         }
842
843         attributes->type = VME_DMA_VME;
844         attributes->private = (void *)vme_attr;
845
846         vme_attr->address = address;
847         vme_attr->aspace = aspace;
848         vme_attr->cycle = cycle;
849         vme_attr->dwidth = dwidth;
850
851         return attributes;
852
853         kfree(vme_attr);
854 err_vme:
855         kfree(attributes);
856 err_attr:
857         return NULL;
858 }
859 EXPORT_SYMBOL(vme_dma_vme_attribute);
860
861 /*
862  * Free attribute
863  */
864 void vme_dma_free_attribute(struct vme_dma_attr *attributes)
865 {
866         kfree(attributes->private);
867         kfree(attributes);
868 }
869 EXPORT_SYMBOL(vme_dma_free_attribute);
870
871 int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
872         struct vme_dma_attr *dest, size_t count)
873 {
874         struct vme_bridge *bridge = list->parent->parent;
875         int retval;
876
877         if (bridge->dma_list_add == NULL) {
878                 printk("Link List DMA generation not supported\n");
879                 return -EINVAL;
880         }
881
882         if (!mutex_trylock(&(list->mtx))) {
883                 printk("Link List already submitted\n");
884                 return -EINVAL;
885         }
886
887         retval = bridge->dma_list_add(list, src, dest, count);
888
889         mutex_unlock(&(list->mtx));
890
891         return retval;
892 }
893 EXPORT_SYMBOL(vme_dma_list_add);
894
895 int vme_dma_list_exec(struct vme_dma_list *list)
896 {
897         struct vme_bridge *bridge = list->parent->parent;
898         int retval;
899
900         if (bridge->dma_list_exec == NULL) {
901                 printk("Link List DMA execution not supported\n");
902                 return -EINVAL;
903         }
904
905         mutex_lock(&(list->mtx));
906
907         retval = bridge->dma_list_exec(list);
908
909         mutex_unlock(&(list->mtx));
910
911         return retval;
912 }
913 EXPORT_SYMBOL(vme_dma_list_exec);
914
915 int vme_dma_list_free(struct vme_dma_list *list)
916 {
917         struct vme_bridge *bridge = list->parent->parent;
918         int retval;
919
920         if (bridge->dma_list_empty == NULL) {
921                 printk("Emptying of Link Lists not supported\n");
922                 return -EINVAL;
923         }
924
925         if (!mutex_trylock(&(list->mtx))) {
926                 printk("Link List in use\n");
927                 return -EINVAL;
928         }
929
930         /*
931          * Empty out all of the entries from the dma list. We need to go to the
932          * low level driver as dma entries are driver specific.
933          */
934         retval = bridge->dma_list_empty(list);
935         if (retval) {
936                 printk("Unable to empty link-list entries\n");
937                 mutex_unlock(&(list->mtx));
938                 return retval;
939         }
940         mutex_unlock(&(list->mtx));
941         kfree(list);
942
943         return retval;
944 }
945 EXPORT_SYMBOL(vme_dma_list_free);
946
947 int vme_dma_free(struct vme_resource *resource)
948 {
949         struct vme_dma_resource *ctrlr;
950
951         if (resource->type != VME_DMA) {
952                 printk("Not a DMA resource\n");
953                 return -EINVAL;
954         }
955
956         ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
957
958         if (!mutex_trylock(&(ctrlr->mtx))) {
959                 printk("Resource busy, can't free\n");
960                 return -EBUSY;
961         }
962
963         if (!(list_empty(&(ctrlr->pending)) && list_empty(&(ctrlr->running)))) {
964                 printk("Resource still processing transfers\n");
965                 mutex_unlock(&(ctrlr->mtx));
966                 return -EBUSY;
967         }
968
969         ctrlr->locked = 0;
970
971         mutex_unlock(&(ctrlr->mtx));
972
973         return 0;
974 }
975 EXPORT_SYMBOL(vme_dma_free);
976
977 void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
978 {
979         void (*call)(int, int, void *);
980         void *priv_data;
981
982         call = bridge->irq[level - 1].callback[statid].func;
983         priv_data = bridge->irq[level - 1].callback[statid].priv_data;
984
985         if (call != NULL)
986                 call(level, statid, priv_data);
987         else
988                 printk(KERN_WARNING "Spurilous VME interrupt, level:%x, "
989                         "vector:%x\n", level, statid);
990 }
991 EXPORT_SYMBOL(vme_irq_handler);
992
993 int vme_irq_request(struct device *dev, int level, int statid,
994         void (*callback)(int level, int vector, void *priv_data),
995         void *priv_data)
996 {
997         struct vme_bridge *bridge;
998
999         bridge = dev_to_bridge(dev);
1000         if (bridge == NULL) {
1001                 printk(KERN_ERR "Can't find VME bus\n");
1002                 return -EINVAL;
1003         }
1004
1005         if((level < 1) || (level > 7)) {
1006                 printk(KERN_ERR "Invalid interrupt level\n");
1007                 return -EINVAL;
1008         }
1009
1010         if (bridge->irq_set == NULL) {
1011                 printk(KERN_ERR "Configuring interrupts not supported\n");
1012                 return -EINVAL;
1013         }
1014
1015         mutex_lock(&(bridge->irq_mtx));
1016
1017         if (bridge->irq[level - 1].callback[statid].func) {
1018                 mutex_unlock(&(bridge->irq_mtx));
1019                 printk(KERN_WARNING "VME Interrupt already taken\n");
1020                 return -EBUSY;
1021         }
1022
1023         bridge->irq[level - 1].count++;
1024         bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1025         bridge->irq[level - 1].callback[statid].func = callback;
1026
1027         /* Enable IRQ level */
1028         bridge->irq_set(level, 1, 1);
1029
1030         mutex_unlock(&(bridge->irq_mtx));
1031
1032         return 0;
1033 }
1034 EXPORT_SYMBOL(vme_irq_request);
1035
1036 void vme_irq_free(struct device *dev, int level, int statid)
1037 {
1038         struct vme_bridge *bridge;
1039
1040         bridge = dev_to_bridge(dev);
1041         if (bridge == NULL) {
1042                 printk(KERN_ERR "Can't find VME bus\n");
1043                 return;
1044         }
1045
1046         if((level < 1) || (level > 7)) {
1047                 printk(KERN_ERR "Invalid interrupt level\n");
1048                 return;
1049         }
1050
1051         if (bridge->irq_set == NULL) {
1052                 printk(KERN_ERR "Configuring interrupts not supported\n");
1053                 return;
1054         }
1055
1056         mutex_lock(&(bridge->irq_mtx));
1057
1058         bridge->irq[level - 1].count--;
1059
1060         /* Disable IRQ level if no more interrupts attached at this level*/
1061         if (bridge->irq[level - 1].count == 0)
1062                 bridge->irq_set(level, 0, 1);
1063
1064         bridge->irq[level - 1].callback[statid].func = NULL;
1065         bridge->irq[level - 1].callback[statid].priv_data = NULL;
1066
1067         mutex_unlock(&(bridge->irq_mtx));
1068 }
1069 EXPORT_SYMBOL(vme_irq_free);
1070
1071 int vme_irq_generate(struct device *dev, int level, int statid)
1072 {
1073         struct vme_bridge *bridge;
1074
1075         bridge = dev_to_bridge(dev);
1076         if (bridge == NULL) {
1077                 printk(KERN_ERR "Can't find VME bus\n");
1078                 return -EINVAL;
1079         }
1080
1081         if((level < 1) || (level > 7)) {
1082                 printk(KERN_WARNING "Invalid interrupt level\n");
1083                 return -EINVAL;
1084         }
1085
1086         if (bridge->irq_generate == NULL) {
1087                 printk("Interrupt generation not supported\n");
1088                 return -EINVAL;
1089         }
1090
1091         return bridge->irq_generate(level, statid);
1092 }
1093 EXPORT_SYMBOL(vme_irq_generate);
1094
1095 /*
1096  * Request the location monitor, return resource or NULL
1097  */
1098 struct vme_resource *vme_lm_request(struct device *dev)
1099 {
1100         struct vme_bridge *bridge;
1101         struct list_head *lm_pos = NULL;
1102         struct vme_lm_resource *allocated_lm = NULL;
1103         struct vme_lm_resource *lm = NULL;
1104         struct vme_resource *resource = NULL;
1105
1106         bridge = dev_to_bridge(dev);
1107         if (bridge == NULL) {
1108                 printk(KERN_ERR "Can't find VME bus\n");
1109                 goto err_bus;
1110         }
1111
1112         /* Loop through DMA resources */
1113         list_for_each(lm_pos, &(bridge->lm_resources)) {
1114                 lm = list_entry(lm_pos,
1115                         struct vme_lm_resource, list);
1116
1117                 if (lm == NULL) {
1118                         printk(KERN_ERR "Registered NULL Location Monitor "
1119                                 "resource\n");
1120                         continue;
1121                 }
1122
1123                 /* Find an unlocked controller */
1124                 mutex_lock(&(lm->mtx));
1125                 if (lm->locked == 0) {
1126                         lm->locked = 1;
1127                         mutex_unlock(&(lm->mtx));
1128                         allocated_lm = lm;
1129                         break;
1130                 }
1131                 mutex_unlock(&(lm->mtx));
1132         }
1133
1134         /* Check to see if we found a resource */
1135         if (allocated_lm == NULL)
1136                 goto err_lm;
1137
1138         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1139         if (resource == NULL) {
1140                 printk(KERN_ERR "Unable to allocate resource structure\n");
1141                 goto err_alloc;
1142         }
1143         resource->type = VME_LM;
1144         resource->entry = &(allocated_lm->list);
1145
1146         return resource;
1147
1148 err_alloc:
1149         /* Unlock image */
1150         mutex_lock(&(lm->mtx));
1151         lm->locked = 0;
1152         mutex_unlock(&(lm->mtx));
1153 err_lm:
1154 err_bus:
1155         return NULL;
1156 }
1157 EXPORT_SYMBOL(vme_lm_request);
1158
1159 int vme_lm_count(struct vme_resource *resource)
1160 {
1161         struct vme_lm_resource *lm;
1162
1163         if (resource->type != VME_LM) {
1164                 printk(KERN_ERR "Not a Location Monitor resource\n");
1165                 return -EINVAL;
1166         }
1167
1168         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1169
1170         return lm->monitors;
1171 }
1172 EXPORT_SYMBOL(vme_lm_count);
1173
1174 int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1175         vme_address_t aspace, vme_cycle_t cycle)
1176 {
1177         struct vme_bridge *bridge = find_bridge(resource);
1178         struct vme_lm_resource *lm;
1179
1180         if (resource->type != VME_LM) {
1181                 printk(KERN_ERR "Not a Location Monitor resource\n");
1182                 return -EINVAL;
1183         }
1184
1185         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1186
1187         if (bridge->lm_set == NULL) {
1188                 printk(KERN_ERR "vme_lm_set not supported\n");
1189                 return -EINVAL;
1190         }
1191
1192         /* XXX Check parameters */
1193
1194         return bridge->lm_set(lm, lm_base, aspace, cycle);
1195 }
1196 EXPORT_SYMBOL(vme_lm_set);
1197
1198 int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1199         vme_address_t *aspace, vme_cycle_t *cycle)
1200 {
1201         struct vme_bridge *bridge = find_bridge(resource);
1202         struct vme_lm_resource *lm;
1203
1204         if (resource->type != VME_LM) {
1205                 printk(KERN_ERR "Not a Location Monitor resource\n");
1206                 return -EINVAL;
1207         }
1208
1209         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1210
1211         if (bridge->lm_get == NULL) {
1212                 printk(KERN_ERR "vme_lm_get not supported\n");
1213                 return -EINVAL;
1214         }
1215
1216         return bridge->lm_get(lm, lm_base, aspace, cycle);
1217 }
1218 EXPORT_SYMBOL(vme_lm_get);
1219
1220 int vme_lm_attach(struct vme_resource *resource, int monitor,
1221         void (*callback)(int))
1222 {
1223         struct vme_bridge *bridge = find_bridge(resource);
1224         struct vme_lm_resource *lm;
1225
1226         if (resource->type != VME_LM) {
1227                 printk(KERN_ERR "Not a Location Monitor resource\n");
1228                 return -EINVAL;
1229         }
1230
1231         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1232
1233         if (bridge->lm_attach == NULL) {
1234                 printk(KERN_ERR "vme_lm_attach not supported\n");
1235                 return -EINVAL;
1236         }
1237
1238         return bridge->lm_attach(lm, monitor, callback);
1239 }
1240 EXPORT_SYMBOL(vme_lm_attach);
1241
1242 int vme_lm_detach(struct vme_resource *resource, int monitor)
1243 {
1244         struct vme_bridge *bridge = find_bridge(resource);
1245         struct vme_lm_resource *lm;
1246
1247         if (resource->type != VME_LM) {
1248                 printk(KERN_ERR "Not a Location Monitor resource\n");
1249                 return -EINVAL;
1250         }
1251
1252         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1253
1254         if (bridge->lm_detach == NULL) {
1255                 printk(KERN_ERR "vme_lm_detach not supported\n");
1256                 return -EINVAL;
1257         }
1258
1259         return bridge->lm_detach(lm, monitor);
1260 }
1261 EXPORT_SYMBOL(vme_lm_detach);
1262
1263 void vme_lm_free(struct vme_resource *resource)
1264 {
1265         struct vme_lm_resource *lm;
1266
1267         if (resource->type != VME_LM) {
1268                 printk(KERN_ERR "Not a Location Monitor resource\n");
1269                 return;
1270         }
1271
1272         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1273
1274         mutex_lock(&(lm->mtx));
1275
1276         /* XXX
1277          * Check to see that there aren't any callbacks still attached, if
1278          * there are we should probably be detaching them!
1279          */
1280
1281         lm->locked = 0;
1282
1283         mutex_unlock(&(lm->mtx));
1284
1285         kfree(resource);
1286 }
1287 EXPORT_SYMBOL(vme_lm_free);
1288
1289 int vme_slot_get(struct device *bus)
1290 {
1291         struct vme_bridge *bridge;
1292
1293         bridge = dev_to_bridge(bus);
1294         if (bridge == NULL) {
1295                 printk(KERN_ERR "Can't find VME bus\n");
1296                 return -EINVAL;
1297         }
1298
1299         if (bridge->slot_get == NULL) {
1300                 printk("vme_slot_get not supported\n");
1301                 return -EINVAL;
1302         }
1303
1304         return bridge->slot_get();
1305 }
1306 EXPORT_SYMBOL(vme_slot_get);
1307
1308
1309 /* - Bridge Registration --------------------------------------------------- */
1310
1311 static int vme_alloc_bus_num(void)
1312 {
1313         int i;
1314
1315         mutex_lock(&vme_bus_num_mtx);
1316         for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1317                 if (((vme_bus_numbers >> i) & 0x1) == 0) {
1318                         vme_bus_numbers |= (0x1 << i);
1319                         break;
1320                 }
1321         }
1322         mutex_unlock(&vme_bus_num_mtx);
1323
1324         return i;
1325 }
1326
1327 static void vme_free_bus_num(int bus)
1328 {
1329         mutex_lock(&vme_bus_num_mtx);
1330         vme_bus_numbers |= ~(0x1 << bus);
1331         mutex_unlock(&vme_bus_num_mtx);
1332 }
1333
1334 int vme_register_bridge (struct vme_bridge *bridge)
1335 {
1336         struct device *dev;
1337         int retval;
1338         int i;
1339
1340         bridge->num = vme_alloc_bus_num();
1341
1342         /* This creates 32 vme "slot" devices. This equates to a slot for each
1343          * ID available in a system conforming to the ANSI/VITA 1-1994
1344          * specification.
1345          */
1346         for (i = 0; i < VME_SLOTS_MAX; i++) {
1347                 dev = &(bridge->dev[i]);
1348                 memset(dev, 0, sizeof(struct device));
1349
1350                 dev->parent = bridge->parent;
1351                 dev->bus = &(vme_bus_type);
1352                 /*
1353                  * We save a pointer to the bridge in platform_data so that we
1354                  * can get to it later. We keep driver_data for use by the
1355                  * driver that binds against the slot
1356                  */
1357                 dev->platform_data = bridge;
1358                 dev_set_name(dev, "vme-%x.%x", bridge->num, i + 1);
1359
1360                 retval = device_register(dev);
1361                 if(retval)
1362                         goto err_reg;
1363         }
1364
1365         return retval;
1366
1367         i = VME_SLOTS_MAX;
1368 err_reg:
1369         while (i > -1) {
1370                 dev = &(bridge->dev[i]);
1371                 device_unregister(dev);
1372         }
1373         vme_free_bus_num(bridge->num);
1374         return retval;
1375 }
1376 EXPORT_SYMBOL(vme_register_bridge);
1377
1378 void vme_unregister_bridge (struct vme_bridge *bridge)
1379 {
1380         int i;
1381         struct device *dev;
1382
1383
1384         for (i = 0; i < VME_SLOTS_MAX; i++) {
1385                 dev = &(bridge->dev[i]);
1386                 device_unregister(dev);
1387         }
1388         vme_free_bus_num(bridge->num);
1389 }
1390 EXPORT_SYMBOL(vme_unregister_bridge);
1391
1392
1393 /* - Driver Registration --------------------------------------------------- */
1394
1395 int vme_register_driver (struct vme_driver *drv)
1396 {
1397         drv->driver.name = drv->name;
1398         drv->driver.bus = &vme_bus_type;
1399
1400         return driver_register(&drv->driver);
1401 }
1402 EXPORT_SYMBOL(vme_register_driver);
1403
1404 void vme_unregister_driver (struct vme_driver *drv)
1405 {
1406         driver_unregister(&drv->driver);
1407 }
1408 EXPORT_SYMBOL(vme_unregister_driver);
1409
1410 /* - Bus Registration ------------------------------------------------------ */
1411
1412 int vme_calc_slot(struct device *dev)
1413 {
1414         struct vme_bridge *bridge;
1415         int num;
1416
1417         bridge = dev_to_bridge(dev);
1418
1419         /* Determine slot number */
1420         num = 0;
1421         while(num < VME_SLOTS_MAX) {
1422                 if(&(bridge->dev[num]) == dev) {
1423                         break;
1424                 }
1425                 num++;
1426         }
1427         if (num == VME_SLOTS_MAX) {
1428                 dev_err(dev, "Failed to identify slot\n");
1429                 num = 0;
1430                 goto err_dev;
1431         }
1432         num++;
1433
1434 err_dev:
1435         return num;
1436 }
1437
1438 static struct vme_driver *dev_to_vme_driver(struct device *dev)
1439 {
1440         if(dev->driver == NULL)
1441                 printk("Bugger dev->driver is NULL\n");
1442
1443         return container_of(dev->driver, struct vme_driver, driver);
1444 }
1445
1446 static int vme_bus_match(struct device *dev, struct device_driver *drv)
1447 {
1448         struct vme_bridge *bridge;
1449         struct vme_driver *driver;
1450         int i, num;
1451
1452         bridge = dev_to_bridge(dev);
1453         driver = container_of(drv, struct vme_driver, driver);
1454
1455         num = vme_calc_slot(dev);
1456         if (!num)
1457                 goto err_dev;
1458
1459         if (driver->bind_table == NULL) {
1460                 dev_err(dev, "Bind table NULL\n");
1461                 goto err_table;
1462         }
1463
1464         i = 0;
1465         while((driver->bind_table[i].bus != 0) ||
1466                 (driver->bind_table[i].slot != 0)) {
1467
1468                 if (bridge->num == driver->bind_table[i].bus) {
1469                         if (num == driver->bind_table[i].slot)
1470                                 return 1;
1471
1472                         if (driver->bind_table[i].slot == VME_SLOT_ALL)
1473                                 return 1;
1474
1475                         if ((driver->bind_table[i].slot == VME_SLOT_CURRENT) &&
1476                                 (num == vme_slot_get(dev)))
1477                                 return 1;
1478                 }
1479                 i++;
1480         }
1481
1482 err_dev:
1483 err_table:
1484         return 0;
1485 }
1486
1487 static int vme_bus_probe(struct device *dev)
1488 {
1489         struct vme_bridge *bridge;
1490         struct vme_driver *driver;
1491         int retval = -ENODEV;
1492
1493         driver = dev_to_vme_driver(dev);
1494         bridge = dev_to_bridge(dev);
1495
1496         if(driver->probe != NULL) {
1497                 retval = driver->probe(dev, bridge->num, vme_calc_slot(dev));
1498         }
1499
1500         return retval;
1501 }
1502
1503 static int vme_bus_remove(struct device *dev)
1504 {
1505         struct vme_bridge *bridge;
1506         struct vme_driver *driver;
1507         int retval = -ENODEV;
1508
1509         driver = dev_to_vme_driver(dev);
1510         bridge = dev_to_bridge(dev);
1511
1512         if(driver->remove != NULL) {
1513                 retval = driver->remove(dev, bridge->num, vme_calc_slot(dev));
1514         }
1515
1516         return retval;
1517 }
1518
1519 struct bus_type vme_bus_type = {
1520         .name = "vme",
1521         .match = vme_bus_match,
1522         .probe = vme_bus_probe,
1523         .remove = vme_bus_remove,
1524 };
1525 EXPORT_SYMBOL(vme_bus_type);
1526
1527 static int __init vme_init (void)
1528 {
1529         return bus_register(&vme_bus_type);
1530 }
1531
1532 static void __exit vme_exit (void)
1533 {
1534         bus_unregister(&vme_bus_type);
1535 }
1536
1537 MODULE_DESCRIPTION("VME bridge driver framework");
1538 MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com");
1539 MODULE_LICENSE("GPL");
1540
1541 module_init(vme_init);
1542 module_exit(vme_exit);