Merge commit 'origin/master' into next
[pandora-kernel.git] / arch / sparc / kernel / pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2  *
3  * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
16 #include <linux/of_device.h>
17
18 #include <asm/iommu.h>
19 #include <asm/irq.h>
20 #include <asm/hypervisor.h>
21 #include <asm/prom.h>
22
23 #include "pci_impl.h"
24 #include "iommu_common.h"
25
26 #include "pci_sun4v.h"
27
28 #define DRIVER_NAME     "pci_sun4v"
29 #define PFX             DRIVER_NAME ": "
30
31 static unsigned long vpci_major = 1;
32 static unsigned long vpci_minor = 1;
33
34 #define PGLIST_NENTS    (PAGE_SIZE / sizeof(u64))
35
36 struct iommu_batch {
37         struct device   *dev;           /* Device mapping is for.       */
38         unsigned long   prot;           /* IOMMU page protections       */
39         unsigned long   entry;          /* Index into IOTSB.            */
40         u64             *pglist;        /* List of physical pages       */
41         unsigned long   npages;         /* Number of pages in list.     */
42 };
43
44 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
45 static int iommu_batch_initialized;
46
47 /* Interrupts must be disabled.  */
48 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
49 {
50         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
51
52         p->dev          = dev;
53         p->prot         = prot;
54         p->entry        = entry;
55         p->npages       = 0;
56 }
57
58 /* Interrupts must be disabled.  */
59 static long iommu_batch_flush(struct iommu_batch *p)
60 {
61         struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
62         unsigned long devhandle = pbm->devhandle;
63         unsigned long prot = p->prot;
64         unsigned long entry = p->entry;
65         u64 *pglist = p->pglist;
66         unsigned long npages = p->npages;
67
68         while (npages != 0) {
69                 long num;
70
71                 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
72                                           npages, prot, __pa(pglist));
73                 if (unlikely(num < 0)) {
74                         if (printk_ratelimit())
75                                 printk("iommu_batch_flush: IOMMU map of "
76                                        "[%08lx:%08llx:%lx:%lx:%lx] failed with "
77                                        "status %ld\n",
78                                        devhandle, HV_PCI_TSBID(0, entry),
79                                        npages, prot, __pa(pglist), num);
80                         return -1;
81                 }
82
83                 entry += num;
84                 npages -= num;
85                 pglist += num;
86         }
87
88         p->entry = entry;
89         p->npages = 0;
90
91         return 0;
92 }
93
94 static inline void iommu_batch_new_entry(unsigned long entry)
95 {
96         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
97
98         if (p->entry + p->npages == entry)
99                 return;
100         if (p->entry != ~0UL)
101                 iommu_batch_flush(p);
102         p->entry = entry;
103 }
104
105 /* Interrupts must be disabled.  */
106 static inline long iommu_batch_add(u64 phys_page)
107 {
108         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
109
110         BUG_ON(p->npages >= PGLIST_NENTS);
111
112         p->pglist[p->npages++] = phys_page;
113         if (p->npages == PGLIST_NENTS)
114                 return iommu_batch_flush(p);
115
116         return 0;
117 }
118
119 /* Interrupts must be disabled.  */
120 static inline long iommu_batch_end(void)
121 {
122         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
123
124         BUG_ON(p->npages >= PGLIST_NENTS);
125
126         return iommu_batch_flush(p);
127 }
128
129 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
130                                    dma_addr_t *dma_addrp, gfp_t gfp)
131 {
132         unsigned long flags, order, first_page, npages, n;
133         struct iommu *iommu;
134         struct page *page;
135         void *ret;
136         long entry;
137         int nid;
138
139         size = IO_PAGE_ALIGN(size);
140         order = get_order(size);
141         if (unlikely(order >= MAX_ORDER))
142                 return NULL;
143
144         npages = size >> IO_PAGE_SHIFT;
145
146         nid = dev->archdata.numa_node;
147         page = alloc_pages_node(nid, gfp, order);
148         if (unlikely(!page))
149                 return NULL;
150
151         first_page = (unsigned long) page_address(page);
152         memset((char *)first_page, 0, PAGE_SIZE << order);
153
154         iommu = dev->archdata.iommu;
155
156         spin_lock_irqsave(&iommu->lock, flags);
157         entry = iommu_range_alloc(dev, iommu, npages, NULL);
158         spin_unlock_irqrestore(&iommu->lock, flags);
159
160         if (unlikely(entry == DMA_ERROR_CODE))
161                 goto range_alloc_fail;
162
163         *dma_addrp = (iommu->page_table_map_base +
164                       (entry << IO_PAGE_SHIFT));
165         ret = (void *) first_page;
166         first_page = __pa(first_page);
167
168         local_irq_save(flags);
169
170         iommu_batch_start(dev,
171                           (HV_PCI_MAP_ATTR_READ |
172                            HV_PCI_MAP_ATTR_WRITE),
173                           entry);
174
175         for (n = 0; n < npages; n++) {
176                 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
177                 if (unlikely(err < 0L))
178                         goto iommu_map_fail;
179         }
180
181         if (unlikely(iommu_batch_end() < 0L))
182                 goto iommu_map_fail;
183
184         local_irq_restore(flags);
185
186         return ret;
187
188 iommu_map_fail:
189         /* Interrupts are disabled.  */
190         spin_lock(&iommu->lock);
191         iommu_range_free(iommu, *dma_addrp, npages);
192         spin_unlock_irqrestore(&iommu->lock, flags);
193
194 range_alloc_fail:
195         free_pages(first_page, order);
196         return NULL;
197 }
198
199 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
200                                  dma_addr_t dvma)
201 {
202         struct pci_pbm_info *pbm;
203         struct iommu *iommu;
204         unsigned long flags, order, npages, entry;
205         u32 devhandle;
206
207         npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
208         iommu = dev->archdata.iommu;
209         pbm = dev->archdata.host_controller;
210         devhandle = pbm->devhandle;
211         entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
212
213         spin_lock_irqsave(&iommu->lock, flags);
214
215         iommu_range_free(iommu, dvma, npages);
216
217         do {
218                 unsigned long num;
219
220                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
221                                             npages);
222                 entry += num;
223                 npages -= num;
224         } while (npages != 0);
225
226         spin_unlock_irqrestore(&iommu->lock, flags);
227
228         order = get_order(size);
229         if (order < 10)
230                 free_pages((unsigned long)cpu, order);
231 }
232
233 static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
234                                   unsigned long offset, size_t sz,
235                                   enum dma_data_direction direction)
236 {
237         struct iommu *iommu;
238         unsigned long flags, npages, oaddr;
239         unsigned long i, base_paddr;
240         u32 bus_addr, ret;
241         unsigned long prot;
242         long entry;
243
244         iommu = dev->archdata.iommu;
245
246         if (unlikely(direction == DMA_NONE))
247                 goto bad;
248
249         oaddr = (unsigned long)(page_address(page) + offset);
250         npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
251         npages >>= IO_PAGE_SHIFT;
252
253         spin_lock_irqsave(&iommu->lock, flags);
254         entry = iommu_range_alloc(dev, iommu, npages, NULL);
255         spin_unlock_irqrestore(&iommu->lock, flags);
256
257         if (unlikely(entry == DMA_ERROR_CODE))
258                 goto bad;
259
260         bus_addr = (iommu->page_table_map_base +
261                     (entry << IO_PAGE_SHIFT));
262         ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
263         base_paddr = __pa(oaddr & IO_PAGE_MASK);
264         prot = HV_PCI_MAP_ATTR_READ;
265         if (direction != DMA_TO_DEVICE)
266                 prot |= HV_PCI_MAP_ATTR_WRITE;
267
268         local_irq_save(flags);
269
270         iommu_batch_start(dev, prot, entry);
271
272         for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
273                 long err = iommu_batch_add(base_paddr);
274                 if (unlikely(err < 0L))
275                         goto iommu_map_fail;
276         }
277         if (unlikely(iommu_batch_end() < 0L))
278                 goto iommu_map_fail;
279
280         local_irq_restore(flags);
281
282         return ret;
283
284 bad:
285         if (printk_ratelimit())
286                 WARN_ON(1);
287         return DMA_ERROR_CODE;
288
289 iommu_map_fail:
290         /* Interrupts are disabled.  */
291         spin_lock(&iommu->lock);
292         iommu_range_free(iommu, bus_addr, npages);
293         spin_unlock_irqrestore(&iommu->lock, flags);
294
295         return DMA_ERROR_CODE;
296 }
297
298 static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
299                               size_t sz, enum dma_data_direction direction)
300 {
301         struct pci_pbm_info *pbm;
302         struct iommu *iommu;
303         unsigned long flags, npages;
304         long entry;
305         u32 devhandle;
306
307         if (unlikely(direction == DMA_NONE)) {
308                 if (printk_ratelimit())
309                         WARN_ON(1);
310                 return;
311         }
312
313         iommu = dev->archdata.iommu;
314         pbm = dev->archdata.host_controller;
315         devhandle = pbm->devhandle;
316
317         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
318         npages >>= IO_PAGE_SHIFT;
319         bus_addr &= IO_PAGE_MASK;
320
321         spin_lock_irqsave(&iommu->lock, flags);
322
323         iommu_range_free(iommu, bus_addr, npages);
324
325         entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
326         do {
327                 unsigned long num;
328
329                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
330                                             npages);
331                 entry += num;
332                 npages -= num;
333         } while (npages != 0);
334
335         spin_unlock_irqrestore(&iommu->lock, flags);
336 }
337
338 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
339                          int nelems, enum dma_data_direction direction)
340 {
341         struct scatterlist *s, *outs, *segstart;
342         unsigned long flags, handle, prot;
343         dma_addr_t dma_next = 0, dma_addr;
344         unsigned int max_seg_size;
345         unsigned long seg_boundary_size;
346         int outcount, incount, i;
347         struct iommu *iommu;
348         unsigned long base_shift;
349         long err;
350
351         BUG_ON(direction == DMA_NONE);
352
353         iommu = dev->archdata.iommu;
354         if (nelems == 0 || !iommu)
355                 return 0;
356         
357         prot = HV_PCI_MAP_ATTR_READ;
358         if (direction != DMA_TO_DEVICE)
359                 prot |= HV_PCI_MAP_ATTR_WRITE;
360
361         outs = s = segstart = &sglist[0];
362         outcount = 1;
363         incount = nelems;
364         handle = 0;
365
366         /* Init first segment length for backout at failure */
367         outs->dma_length = 0;
368
369         spin_lock_irqsave(&iommu->lock, flags);
370
371         iommu_batch_start(dev, prot, ~0UL);
372
373         max_seg_size = dma_get_max_seg_size(dev);
374         seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
375                                   IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
376         base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
377         for_each_sg(sglist, s, nelems, i) {
378                 unsigned long paddr, npages, entry, out_entry = 0, slen;
379
380                 slen = s->length;
381                 /* Sanity check */
382                 if (slen == 0) {
383                         dma_next = 0;
384                         continue;
385                 }
386                 /* Allocate iommu entries for that segment */
387                 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
388                 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
389                 entry = iommu_range_alloc(dev, iommu, npages, &handle);
390
391                 /* Handle failure */
392                 if (unlikely(entry == DMA_ERROR_CODE)) {
393                         if (printk_ratelimit())
394                                 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
395                                        " npages %lx\n", iommu, paddr, npages);
396                         goto iommu_map_failed;
397                 }
398
399                 iommu_batch_new_entry(entry);
400
401                 /* Convert entry to a dma_addr_t */
402                 dma_addr = iommu->page_table_map_base +
403                         (entry << IO_PAGE_SHIFT);
404                 dma_addr |= (s->offset & ~IO_PAGE_MASK);
405
406                 /* Insert into HW table */
407                 paddr &= IO_PAGE_MASK;
408                 while (npages--) {
409                         err = iommu_batch_add(paddr);
410                         if (unlikely(err < 0L))
411                                 goto iommu_map_failed;
412                         paddr += IO_PAGE_SIZE;
413                 }
414
415                 /* If we are in an open segment, try merging */
416                 if (segstart != s) {
417                         /* We cannot merge if:
418                          * - allocated dma_addr isn't contiguous to previous allocation
419                          */
420                         if ((dma_addr != dma_next) ||
421                             (outs->dma_length + s->length > max_seg_size) ||
422                             (is_span_boundary(out_entry, base_shift,
423                                               seg_boundary_size, outs, s))) {
424                                 /* Can't merge: create a new segment */
425                                 segstart = s;
426                                 outcount++;
427                                 outs = sg_next(outs);
428                         } else {
429                                 outs->dma_length += s->length;
430                         }
431                 }
432
433                 if (segstart == s) {
434                         /* This is a new segment, fill entries */
435                         outs->dma_address = dma_addr;
436                         outs->dma_length = slen;
437                         out_entry = entry;
438                 }
439
440                 /* Calculate next page pointer for contiguous check */
441                 dma_next = dma_addr + slen;
442         }
443
444         err = iommu_batch_end();
445
446         if (unlikely(err < 0L))
447                 goto iommu_map_failed;
448
449         spin_unlock_irqrestore(&iommu->lock, flags);
450
451         if (outcount < incount) {
452                 outs = sg_next(outs);
453                 outs->dma_address = DMA_ERROR_CODE;
454                 outs->dma_length = 0;
455         }
456
457         return outcount;
458
459 iommu_map_failed:
460         for_each_sg(sglist, s, nelems, i) {
461                 if (s->dma_length != 0) {
462                         unsigned long vaddr, npages;
463
464                         vaddr = s->dma_address & IO_PAGE_MASK;
465                         npages = iommu_num_pages(s->dma_address, s->dma_length,
466                                                  IO_PAGE_SIZE);
467                         iommu_range_free(iommu, vaddr, npages);
468                         /* XXX demap? XXX */
469                         s->dma_address = DMA_ERROR_CODE;
470                         s->dma_length = 0;
471                 }
472                 if (s == outs)
473                         break;
474         }
475         spin_unlock_irqrestore(&iommu->lock, flags);
476
477         return 0;
478 }
479
480 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
481                             int nelems, enum dma_data_direction direction)
482 {
483         struct pci_pbm_info *pbm;
484         struct scatterlist *sg;
485         struct iommu *iommu;
486         unsigned long flags;
487         u32 devhandle;
488
489         BUG_ON(direction == DMA_NONE);
490
491         iommu = dev->archdata.iommu;
492         pbm = dev->archdata.host_controller;
493         devhandle = pbm->devhandle;
494         
495         spin_lock_irqsave(&iommu->lock, flags);
496
497         sg = sglist;
498         while (nelems--) {
499                 dma_addr_t dma_handle = sg->dma_address;
500                 unsigned int len = sg->dma_length;
501                 unsigned long npages, entry;
502
503                 if (!len)
504                         break;
505                 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
506                 iommu_range_free(iommu, dma_handle, npages);
507
508                 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
509                 while (npages) {
510                         unsigned long num;
511
512                         num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
513                                                     npages);
514                         entry += num;
515                         npages -= num;
516                 }
517
518                 sg = sg_next(sg);
519         }
520
521         spin_unlock_irqrestore(&iommu->lock, flags);
522 }
523
524 static void dma_4v_sync_single_for_cpu(struct device *dev,
525                                        dma_addr_t bus_addr, size_t sz,
526                                        enum dma_data_direction direction)
527 {
528         /* Nothing to do... */
529 }
530
531 static void dma_4v_sync_sg_for_cpu(struct device *dev,
532                                    struct scatterlist *sglist, int nelems,
533                                    enum dma_data_direction direction)
534 {
535         /* Nothing to do... */
536 }
537
538 static const struct dma_ops sun4v_dma_ops = {
539         .alloc_coherent                 = dma_4v_alloc_coherent,
540         .free_coherent                  = dma_4v_free_coherent,
541         .map_page                       = dma_4v_map_page,
542         .unmap_page                     = dma_4v_unmap_page,
543         .map_sg                         = dma_4v_map_sg,
544         .unmap_sg                       = dma_4v_unmap_sg,
545         .sync_single_for_cpu            = dma_4v_sync_single_for_cpu,
546         .sync_sg_for_cpu                = dma_4v_sync_sg_for_cpu,
547 };
548
549 static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
550                                          struct device *parent)
551 {
552         struct property *prop;
553         struct device_node *dp;
554
555         dp = pbm->op->node;
556         prop = of_find_property(dp, "66mhz-capable", NULL);
557         pbm->is_66mhz_capable = (prop != NULL);
558         pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
559
560         /* XXX register error interrupt handlers XXX */
561 }
562
563 static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm,
564                                                       struct iommu *iommu)
565 {
566         struct iommu_arena *arena = &iommu->arena;
567         unsigned long i, cnt = 0;
568         u32 devhandle;
569
570         devhandle = pbm->devhandle;
571         for (i = 0; i < arena->limit; i++) {
572                 unsigned long ret, io_attrs, ra;
573
574                 ret = pci_sun4v_iommu_getmap(devhandle,
575                                              HV_PCI_TSBID(0, i),
576                                              &io_attrs, &ra);
577                 if (ret == HV_EOK) {
578                         if (page_in_phys_avail(ra)) {
579                                 pci_sun4v_iommu_demap(devhandle,
580                                                       HV_PCI_TSBID(0, i), 1);
581                         } else {
582                                 cnt++;
583                                 __set_bit(i, arena->map);
584                         }
585                 }
586         }
587
588         return cnt;
589 }
590
591 static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
592 {
593         static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
594         struct iommu *iommu = pbm->iommu;
595         unsigned long num_tsb_entries, sz, tsbsize;
596         u32 dma_mask, dma_offset;
597         const u32 *vdma;
598
599         vdma = of_get_property(pbm->op->node, "virtual-dma", NULL);
600         if (!vdma)
601                 vdma = vdma_default;
602
603         if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
604                 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
605                        vdma[0], vdma[1]);
606                 return -EINVAL;
607         };
608
609         dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
610         num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
611         tsbsize = num_tsb_entries * sizeof(iopte_t);
612
613         dma_offset = vdma[0];
614
615         /* Setup initial software IOMMU state. */
616         spin_lock_init(&iommu->lock);
617         iommu->ctx_lowest_free = 1;
618         iommu->page_table_map_base = dma_offset;
619         iommu->dma_addr_mask = dma_mask;
620
621         /* Allocate and initialize the free area map.  */
622         sz = (num_tsb_entries + 7) / 8;
623         sz = (sz + 7UL) & ~7UL;
624         iommu->arena.map = kzalloc(sz, GFP_KERNEL);
625         if (!iommu->arena.map) {
626                 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
627                 return -ENOMEM;
628         }
629         iommu->arena.limit = num_tsb_entries;
630
631         sz = probe_existing_entries(pbm, iommu);
632         if (sz)
633                 printk("%s: Imported %lu TSB entries from OBP\n",
634                        pbm->name, sz);
635
636         return 0;
637 }
638
639 #ifdef CONFIG_PCI_MSI
640 struct pci_sun4v_msiq_entry {
641         u64             version_type;
642 #define MSIQ_VERSION_MASK               0xffffffff00000000UL
643 #define MSIQ_VERSION_SHIFT              32
644 #define MSIQ_TYPE_MASK                  0x00000000000000ffUL
645 #define MSIQ_TYPE_SHIFT                 0
646 #define MSIQ_TYPE_NONE                  0x00
647 #define MSIQ_TYPE_MSG                   0x01
648 #define MSIQ_TYPE_MSI32                 0x02
649 #define MSIQ_TYPE_MSI64                 0x03
650 #define MSIQ_TYPE_INTX                  0x08
651 #define MSIQ_TYPE_NONE2                 0xff
652
653         u64             intx_sysino;
654         u64             reserved1;
655         u64             stick;
656         u64             req_id;  /* bus/device/func */
657 #define MSIQ_REQID_BUS_MASK             0xff00UL
658 #define MSIQ_REQID_BUS_SHIFT            8
659 #define MSIQ_REQID_DEVICE_MASK          0x00f8UL
660 #define MSIQ_REQID_DEVICE_SHIFT         3
661 #define MSIQ_REQID_FUNC_MASK            0x0007UL
662 #define MSIQ_REQID_FUNC_SHIFT           0
663
664         u64             msi_address;
665
666         /* The format of this value is message type dependent.
667          * For MSI bits 15:0 are the data from the MSI packet.
668          * For MSI-X bits 31:0 are the data from the MSI packet.
669          * For MSG, the message code and message routing code where:
670          *      bits 39:32 is the bus/device/fn of the msg target-id
671          *      bits 18:16 is the message routing code
672          *      bits 7:0 is the message code
673          * For INTx the low order 2-bits are:
674          *      00 - INTA
675          *      01 - INTB
676          *      10 - INTC
677          *      11 - INTD
678          */
679         u64             msi_data;
680
681         u64             reserved2;
682 };
683
684 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
685                               unsigned long *head)
686 {
687         unsigned long err, limit;
688
689         err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
690         if (unlikely(err))
691                 return -ENXIO;
692
693         limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
694         if (unlikely(*head >= limit))
695                 return -EFBIG;
696
697         return 0;
698 }
699
700 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
701                                  unsigned long msiqid, unsigned long *head,
702                                  unsigned long *msi)
703 {
704         struct pci_sun4v_msiq_entry *ep;
705         unsigned long err, type;
706
707         /* Note: void pointer arithmetic, 'head' is a byte offset  */
708         ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
709                                  (pbm->msiq_ent_count *
710                                   sizeof(struct pci_sun4v_msiq_entry))) +
711               *head);
712
713         if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
714                 return 0;
715
716         type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
717         if (unlikely(type != MSIQ_TYPE_MSI32 &&
718                      type != MSIQ_TYPE_MSI64))
719                 return -EINVAL;
720
721         *msi = ep->msi_data;
722
723         err = pci_sun4v_msi_setstate(pbm->devhandle,
724                                      ep->msi_data /* msi_num */,
725                                      HV_MSISTATE_IDLE);
726         if (unlikely(err))
727                 return -ENXIO;
728
729         /* Clear the entry.  */
730         ep->version_type &= ~MSIQ_TYPE_MASK;
731
732         (*head) += sizeof(struct pci_sun4v_msiq_entry);
733         if (*head >=
734             (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
735                 *head = 0;
736
737         return 1;
738 }
739
740 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
741                               unsigned long head)
742 {
743         unsigned long err;
744
745         err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
746         if (unlikely(err))
747                 return -EINVAL;
748
749         return 0;
750 }
751
752 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
753                                unsigned long msi, int is_msi64)
754 {
755         if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
756                                   (is_msi64 ?
757                                    HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
758                 return -ENXIO;
759         if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
760                 return -ENXIO;
761         if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
762                 return -ENXIO;
763         return 0;
764 }
765
766 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
767 {
768         unsigned long err, msiqid;
769
770         err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
771         if (err)
772                 return -ENXIO;
773
774         pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
775
776         return 0;
777 }
778
779 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
780 {
781         unsigned long q_size, alloc_size, pages, order;
782         int i;
783
784         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
785         alloc_size = (pbm->msiq_num * q_size);
786         order = get_order(alloc_size);
787         pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
788         if (pages == 0UL) {
789                 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
790                        order);
791                 return -ENOMEM;
792         }
793         memset((char *)pages, 0, PAGE_SIZE << order);
794         pbm->msi_queues = (void *) pages;
795
796         for (i = 0; i < pbm->msiq_num; i++) {
797                 unsigned long err, base = __pa(pages + (i * q_size));
798                 unsigned long ret1, ret2;
799
800                 err = pci_sun4v_msiq_conf(pbm->devhandle,
801                                           pbm->msiq_first + i,
802                                           base, pbm->msiq_ent_count);
803                 if (err) {
804                         printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
805                                err);
806                         goto h_error;
807                 }
808
809                 err = pci_sun4v_msiq_info(pbm->devhandle,
810                                           pbm->msiq_first + i,
811                                           &ret1, &ret2);
812                 if (err) {
813                         printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
814                                err);
815                         goto h_error;
816                 }
817                 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
818                         printk(KERN_ERR "MSI: Bogus qconf "
819                                "expected[%lx:%x] got[%lx:%lx]\n",
820                                base, pbm->msiq_ent_count,
821                                ret1, ret2);
822                         goto h_error;
823                 }
824         }
825
826         return 0;
827
828 h_error:
829         free_pages(pages, order);
830         return -EINVAL;
831 }
832
833 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
834 {
835         unsigned long q_size, alloc_size, pages, order;
836         int i;
837
838         for (i = 0; i < pbm->msiq_num; i++) {
839                 unsigned long msiqid = pbm->msiq_first + i;
840
841                 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
842         }
843
844         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
845         alloc_size = (pbm->msiq_num * q_size);
846         order = get_order(alloc_size);
847
848         pages = (unsigned long) pbm->msi_queues;
849
850         free_pages(pages, order);
851
852         pbm->msi_queues = NULL;
853 }
854
855 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
856                                     unsigned long msiqid,
857                                     unsigned long devino)
858 {
859         unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
860
861         if (!virt_irq)
862                 return -ENOMEM;
863
864         if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
865                 return -EINVAL;
866         if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
867                 return -EINVAL;
868
869         return virt_irq;
870 }
871
872 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
873         .get_head       =       pci_sun4v_get_head,
874         .dequeue_msi    =       pci_sun4v_dequeue_msi,
875         .set_head       =       pci_sun4v_set_head,
876         .msi_setup      =       pci_sun4v_msi_setup,
877         .msi_teardown   =       pci_sun4v_msi_teardown,
878         .msiq_alloc     =       pci_sun4v_msiq_alloc,
879         .msiq_free      =       pci_sun4v_msiq_free,
880         .msiq_build_irq =       pci_sun4v_msiq_build_irq,
881 };
882
883 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
884 {
885         sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
886 }
887 #else /* CONFIG_PCI_MSI */
888 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
889 {
890 }
891 #endif /* !(CONFIG_PCI_MSI) */
892
893 static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
894                                         struct of_device *op, u32 devhandle)
895 {
896         struct device_node *dp = op->node;
897         int err;
898
899         pbm->numa_node = of_node_to_nid(dp);
900
901         pbm->pci_ops = &sun4v_pci_ops;
902         pbm->config_space_reg_bits = 12;
903
904         pbm->index = pci_num_pbms++;
905
906         pbm->op = op;
907
908         pbm->devhandle = devhandle;
909
910         pbm->name = dp->full_name;
911
912         printk("%s: SUN4V PCI Bus Module\n", pbm->name);
913         printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
914
915         pci_determine_mem_io_space(pbm);
916
917         pci_get_pbm_props(pbm);
918
919         err = pci_sun4v_iommu_init(pbm);
920         if (err)
921                 return err;
922
923         pci_sun4v_msi_init(pbm);
924
925         pci_sun4v_scan_bus(pbm, &op->dev);
926
927         pbm->next = pci_pbm_root;
928         pci_pbm_root = pbm;
929
930         return 0;
931 }
932
933 static int __devinit pci_sun4v_probe(struct of_device *op,
934                                      const struct of_device_id *match)
935 {
936         const struct linux_prom64_registers *regs;
937         static int hvapi_negotiated = 0;
938         struct pci_pbm_info *pbm;
939         struct device_node *dp;
940         struct iommu *iommu;
941         u32 devhandle;
942         int i, err;
943
944         dp = op->node;
945
946         if (!hvapi_negotiated++) {
947                 err = sun4v_hvapi_register(HV_GRP_PCI,
948                                            vpci_major,
949                                            &vpci_minor);
950
951                 if (err) {
952                         printk(KERN_ERR PFX "Could not register hvapi, "
953                                "err=%d\n", err);
954                         return err;
955                 }
956                 printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
957                        vpci_major, vpci_minor);
958
959                 dma_ops = &sun4v_dma_ops;
960         }
961
962         regs = of_get_property(dp, "reg", NULL);
963         err = -ENODEV;
964         if (!regs) {
965                 printk(KERN_ERR PFX "Could not find config registers\n");
966                 goto out_err;
967         }
968         devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
969
970         err = -ENOMEM;
971         if (!iommu_batch_initialized) {
972                 for_each_possible_cpu(i) {
973                         unsigned long page = get_zeroed_page(GFP_KERNEL);
974
975                         if (!page)
976                                 goto out_err;
977
978                         per_cpu(iommu_batch, i).pglist = (u64 *) page;
979                 }
980                 iommu_batch_initialized = 1;
981         }
982
983         pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
984         if (!pbm) {
985                 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
986                 goto out_err;
987         }
988
989         iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
990         if (!iommu) {
991                 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
992                 goto out_free_controller;
993         }
994
995         pbm->iommu = iommu;
996
997         err = pci_sun4v_pbm_init(pbm, op, devhandle);
998         if (err)
999                 goto out_free_iommu;
1000
1001         dev_set_drvdata(&op->dev, pbm);
1002
1003         return 0;
1004
1005 out_free_iommu:
1006         kfree(pbm->iommu);
1007
1008 out_free_controller:
1009         kfree(pbm);
1010
1011 out_err:
1012         return err;
1013 }
1014
1015 static struct of_device_id __initdata pci_sun4v_match[] = {
1016         {
1017                 .name = "pci",
1018                 .compatible = "SUNW,sun4v-pci",
1019         },
1020         {},
1021 };
1022
1023 static struct of_platform_driver pci_sun4v_driver = {
1024         .name           = DRIVER_NAME,
1025         .match_table    = pci_sun4v_match,
1026         .probe          = pci_sun4v_probe,
1027 };
1028
1029 static int __init pci_sun4v_init(void)
1030 {
1031         return of_register_driver(&pci_sun4v_driver, &of_bus_type);
1032 }
1033
1034 subsys_initcall(pci_sun4v_init);