Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
[pandora-kernel.git] / drivers / xen / xen-pciback / vpci.c
1 /*
2  * PCI Backend - Provides a Virtual PCI bus (with real devices)
3  *               to the frontend
4  *
5  *   Author: Ryan Wilson <hap9@epoch.ncsc.mil>
6  */
7
8 #include <linux/list.h>
9 #include <linux/slab.h>
10 #include <linux/pci.h>
11 #include <linux/spinlock.h>
12 #include "pciback.h"
13
14 #define PCI_SLOT_MAX 32
15 #define DRV_NAME        "xen-pciback"
16
17 struct vpci_dev_data {
18         /* Access to dev_list must be protected by lock */
19         struct list_head dev_list[PCI_SLOT_MAX];
20         spinlock_t lock;
21 };
22
23 static inline struct list_head *list_first(struct list_head *head)
24 {
25         return head->next;
26 }
27
28 static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
29                                                unsigned int domain,
30                                                unsigned int bus,
31                                                unsigned int devfn)
32 {
33         struct pci_dev_entry *entry;
34         struct pci_dev *dev = NULL;
35         struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
36         unsigned long flags;
37
38         if (domain != 0 || bus != 0)
39                 return NULL;
40
41         if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
42                 spin_lock_irqsave(&vpci_dev->lock, flags);
43
44                 list_for_each_entry(entry,
45                                     &vpci_dev->dev_list[PCI_SLOT(devfn)],
46                                     list) {
47                         if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
48                                 dev = entry->dev;
49                                 break;
50                         }
51                 }
52
53                 spin_unlock_irqrestore(&vpci_dev->lock, flags);
54         }
55         return dev;
56 }
57
58 static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
59 {
60         if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
61             && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
62                 return 1;
63
64         return 0;
65 }
66
67 static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
68                                    struct pci_dev *dev, int devid,
69                                    publish_pci_dev_cb publish_cb)
70 {
71         int err = 0, slot, func = -1;
72         struct pci_dev_entry *t, *dev_entry;
73         struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
74         unsigned long flags;
75
76         if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
77                 err = -EFAULT;
78                 xenbus_dev_fatal(pdev->xdev, err,
79                                  "Can't export bridges on the virtual PCI bus");
80                 goto out;
81         }
82
83         dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
84         if (!dev_entry) {
85                 err = -ENOMEM;
86                 xenbus_dev_fatal(pdev->xdev, err,
87                                  "Error adding entry to virtual PCI bus");
88                 goto out;
89         }
90
91         dev_entry->dev = dev;
92
93         spin_lock_irqsave(&vpci_dev->lock, flags);
94
95         /* Keep multi-function devices together on the virtual PCI bus */
96         for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
97                 if (!list_empty(&vpci_dev->dev_list[slot])) {
98                         t = list_entry(list_first(&vpci_dev->dev_list[slot]),
99                                        struct pci_dev_entry, list);
100
101                         if (match_slot(dev, t->dev)) {
102                                 pr_info(DRV_NAME ": vpci: %s: "
103                                         "assign to virtual slot %d func %d\n",
104                                         pci_name(dev), slot,
105                                         PCI_FUNC(dev->devfn));
106                                 list_add_tail(&dev_entry->list,
107                                               &vpci_dev->dev_list[slot]);
108                                 func = PCI_FUNC(dev->devfn);
109                                 goto unlock;
110                         }
111                 }
112         }
113
114         /* Assign to a new slot on the virtual PCI bus */
115         for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
116                 if (list_empty(&vpci_dev->dev_list[slot])) {
117                         printk(KERN_INFO DRV_NAME
118                                ": vpci: %s: assign to virtual slot %d\n",
119                                pci_name(dev), slot);
120                         list_add_tail(&dev_entry->list,
121                                       &vpci_dev->dev_list[slot]);
122                         func = PCI_FUNC(dev->devfn);
123                         goto unlock;
124                 }
125         }
126
127         err = -ENOMEM;
128         xenbus_dev_fatal(pdev->xdev, err,
129                          "No more space on root virtual PCI bus");
130
131 unlock:
132         spin_unlock_irqrestore(&vpci_dev->lock, flags);
133
134         /* Publish this device. */
135         if (!err)
136                 err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
137
138 out:
139         return err;
140 }
141
142 static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
143                                         struct pci_dev *dev)
144 {
145         int slot;
146         struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
147         struct pci_dev *found_dev = NULL;
148         unsigned long flags;
149
150         spin_lock_irqsave(&vpci_dev->lock, flags);
151
152         for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
153                 struct pci_dev_entry *e, *tmp;
154                 list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
155                                          list) {
156                         if (e->dev == dev) {
157                                 list_del(&e->list);
158                                 found_dev = e->dev;
159                                 kfree(e);
160                                 goto out;
161                         }
162                 }
163         }
164
165 out:
166         spin_unlock_irqrestore(&vpci_dev->lock, flags);
167
168         if (found_dev)
169                 pcistub_put_pci_dev(found_dev);
170 }
171
172 static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
173 {
174         int slot;
175         struct vpci_dev_data *vpci_dev;
176
177         vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
178         if (!vpci_dev)
179                 return -ENOMEM;
180
181         spin_lock_init(&vpci_dev->lock);
182
183         for (slot = 0; slot < PCI_SLOT_MAX; slot++)
184                 INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
185
186         pdev->pci_dev_data = vpci_dev;
187
188         return 0;
189 }
190
191 static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
192                                          publish_pci_root_cb publish_cb)
193 {
194         /* The Virtual PCI bus has only one root */
195         return publish_cb(pdev, 0, 0);
196 }
197
198 static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
199 {
200         int slot;
201         struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
202
203         for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
204                 struct pci_dev_entry *e, *tmp;
205                 list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
206                                          list) {
207                         list_del(&e->list);
208                         pcistub_put_pci_dev(e->dev);
209                         kfree(e);
210                 }
211         }
212
213         kfree(vpci_dev);
214         pdev->pci_dev_data = NULL;
215 }
216
217 static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
218                                         struct xen_pcibk_device *pdev,
219                                         unsigned int *domain, unsigned int *bus,
220                                         unsigned int *devfn)
221 {
222         struct pci_dev_entry *entry;
223         struct pci_dev *dev = NULL;
224         struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
225         unsigned long flags;
226         int found = 0, slot;
227
228         spin_lock_irqsave(&vpci_dev->lock, flags);
229         for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
230                 list_for_each_entry(entry,
231                             &vpci_dev->dev_list[slot],
232                             list) {
233                         dev = entry->dev;
234                         if (dev && dev->bus->number == pcidev->bus->number
235                                 && pci_domain_nr(dev->bus) ==
236                                         pci_domain_nr(pcidev->bus)
237                                 && dev->devfn == pcidev->devfn) {
238                                 found = 1;
239                                 *domain = 0;
240                                 *bus = 0;
241                                 *devfn = PCI_DEVFN(slot,
242                                          PCI_FUNC(pcidev->devfn));
243                         }
244                 }
245         }
246         spin_unlock_irqrestore(&vpci_dev->lock, flags);
247         return found;
248 }
249
250 struct xen_pcibk_backend xen_pcibk_vpci_backend = {
251         .name           = "vpci",
252         .init           = __xen_pcibk_init_devices,
253         .free           = __xen_pcibk_release_devices,
254         .find           = __xen_pcibk_get_pcifront_dev,
255         .publish        = __xen_pcibk_publish_pci_roots,
256         .release        = __xen_pcibk_release_pci_dev,
257         .add            = __xen_pcibk_add_pci_dev,
258         .get            = __xen_pcibk_get_pci_dev,
259 };