Merge branch 'fixes-2.6.24' of master.kernel.org:/pub/scm/linux/kernel/git/galak...
[pandora-kernel.git] / arch / sparc64 / kernel / pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2  *
3  * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
16 #include <linux/scatterlist.h>
17
18 #include <asm/iommu.h>
19 #include <asm/irq.h>
20 #include <asm/upa.h>
21 #include <asm/pstate.h>
22 #include <asm/oplib.h>
23 #include <asm/hypervisor.h>
24 #include <asm/prom.h>
25
26 #include "pci_impl.h"
27 #include "iommu_common.h"
28
29 #include "pci_sun4v.h"
30
31 static unsigned long vpci_major = 1;
32 static unsigned long vpci_minor = 1;
33
34 #define PGLIST_NENTS    (PAGE_SIZE / sizeof(u64))
35
36 struct iommu_batch {
37         struct device   *dev;           /* Device mapping is for.       */
38         unsigned long   prot;           /* IOMMU page protections       */
39         unsigned long   entry;          /* Index into IOTSB.            */
40         u64             *pglist;        /* List of physical pages       */
41         unsigned long   npages;         /* Number of pages in list.     */
42 };
43
44 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
45
46 /* Interrupts must be disabled.  */
47 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
48 {
49         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
50
51         p->dev          = dev;
52         p->prot         = prot;
53         p->entry        = entry;
54         p->npages       = 0;
55 }
56
57 /* Interrupts must be disabled.  */
58 static long iommu_batch_flush(struct iommu_batch *p)
59 {
60         struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
61         unsigned long devhandle = pbm->devhandle;
62         unsigned long prot = p->prot;
63         unsigned long entry = p->entry;
64         u64 *pglist = p->pglist;
65         unsigned long npages = p->npages;
66
67         while (npages != 0) {
68                 long num;
69
70                 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
71                                           npages, prot, __pa(pglist));
72                 if (unlikely(num < 0)) {
73                         if (printk_ratelimit())
74                                 printk("iommu_batch_flush: IOMMU map of "
75                                        "[%08lx:%08lx:%lx:%lx:%lx] failed with "
76                                        "status %ld\n",
77                                        devhandle, HV_PCI_TSBID(0, entry),
78                                        npages, prot, __pa(pglist), num);
79                         return -1;
80                 }
81
82                 entry += num;
83                 npages -= num;
84                 pglist += num;
85         }
86
87         p->entry = entry;
88         p->npages = 0;
89
90         return 0;
91 }
92
93 /* Interrupts must be disabled.  */
94 static inline long iommu_batch_add(u64 phys_page)
95 {
96         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
97
98         BUG_ON(p->npages >= PGLIST_NENTS);
99
100         p->pglist[p->npages++] = phys_page;
101         if (p->npages == PGLIST_NENTS)
102                 return iommu_batch_flush(p);
103
104         return 0;
105 }
106
107 /* Interrupts must be disabled.  */
108 static inline long iommu_batch_end(void)
109 {
110         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
111
112         BUG_ON(p->npages >= PGLIST_NENTS);
113
114         return iommu_batch_flush(p);
115 }
116
117 static long arena_alloc(struct iommu_arena *arena, unsigned long npages)
118 {
119         unsigned long n, i, start, end, limit;
120         int pass;
121
122         limit = arena->limit;
123         start = arena->hint;
124         pass = 0;
125
126 again:
127         n = find_next_zero_bit(arena->map, limit, start);
128         end = n + npages;
129         if (unlikely(end >= limit)) {
130                 if (likely(pass < 1)) {
131                         limit = start;
132                         start = 0;
133                         pass++;
134                         goto again;
135                 } else {
136                         /* Scanned the whole thing, give up. */
137                         return -1;
138                 }
139         }
140
141         for (i = n; i < end; i++) {
142                 if (test_bit(i, arena->map)) {
143                         start = i + 1;
144                         goto again;
145                 }
146         }
147
148         for (i = n; i < end; i++)
149                 __set_bit(i, arena->map);
150
151         arena->hint = end;
152
153         return n;
154 }
155
156 static void arena_free(struct iommu_arena *arena, unsigned long base,
157                        unsigned long npages)
158 {
159         unsigned long i;
160
161         for (i = base; i < (base + npages); i++)
162                 __clear_bit(i, arena->map);
163 }
164
165 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
166                                    dma_addr_t *dma_addrp, gfp_t gfp)
167 {
168         struct iommu *iommu;
169         unsigned long flags, order, first_page, npages, n;
170         void *ret;
171         long entry;
172
173         size = IO_PAGE_ALIGN(size);
174         order = get_order(size);
175         if (unlikely(order >= MAX_ORDER))
176                 return NULL;
177
178         npages = size >> IO_PAGE_SHIFT;
179
180         first_page = __get_free_pages(gfp, order);
181         if (unlikely(first_page == 0UL))
182                 return NULL;
183
184         memset((char *)first_page, 0, PAGE_SIZE << order);
185
186         iommu = dev->archdata.iommu;
187
188         spin_lock_irqsave(&iommu->lock, flags);
189         entry = arena_alloc(&iommu->arena, npages);
190         spin_unlock_irqrestore(&iommu->lock, flags);
191
192         if (unlikely(entry < 0L))
193                 goto arena_alloc_fail;
194
195         *dma_addrp = (iommu->page_table_map_base +
196                       (entry << IO_PAGE_SHIFT));
197         ret = (void *) first_page;
198         first_page = __pa(first_page);
199
200         local_irq_save(flags);
201
202         iommu_batch_start(dev,
203                           (HV_PCI_MAP_ATTR_READ |
204                            HV_PCI_MAP_ATTR_WRITE),
205                           entry);
206
207         for (n = 0; n < npages; n++) {
208                 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
209                 if (unlikely(err < 0L))
210                         goto iommu_map_fail;
211         }
212
213         if (unlikely(iommu_batch_end() < 0L))
214                 goto iommu_map_fail;
215
216         local_irq_restore(flags);
217
218         return ret;
219
220 iommu_map_fail:
221         /* Interrupts are disabled.  */
222         spin_lock(&iommu->lock);
223         arena_free(&iommu->arena, entry, npages);
224         spin_unlock_irqrestore(&iommu->lock, flags);
225
226 arena_alloc_fail:
227         free_pages(first_page, order);
228         return NULL;
229 }
230
231 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
232                                  dma_addr_t dvma)
233 {
234         struct pci_pbm_info *pbm;
235         struct iommu *iommu;
236         unsigned long flags, order, npages, entry;
237         u32 devhandle;
238
239         npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
240         iommu = dev->archdata.iommu;
241         pbm = dev->archdata.host_controller;
242         devhandle = pbm->devhandle;
243         entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
244
245         spin_lock_irqsave(&iommu->lock, flags);
246
247         arena_free(&iommu->arena, entry, npages);
248
249         do {
250                 unsigned long num;
251
252                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
253                                             npages);
254                 entry += num;
255                 npages -= num;
256         } while (npages != 0);
257
258         spin_unlock_irqrestore(&iommu->lock, flags);
259
260         order = get_order(size);
261         if (order < 10)
262                 free_pages((unsigned long)cpu, order);
263 }
264
265 static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
266                                     enum dma_data_direction direction)
267 {
268         struct iommu *iommu;
269         unsigned long flags, npages, oaddr;
270         unsigned long i, base_paddr;
271         u32 bus_addr, ret;
272         unsigned long prot;
273         long entry;
274
275         iommu = dev->archdata.iommu;
276
277         if (unlikely(direction == DMA_NONE))
278                 goto bad;
279
280         oaddr = (unsigned long)ptr;
281         npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
282         npages >>= IO_PAGE_SHIFT;
283
284         spin_lock_irqsave(&iommu->lock, flags);
285         entry = arena_alloc(&iommu->arena, npages);
286         spin_unlock_irqrestore(&iommu->lock, flags);
287
288         if (unlikely(entry < 0L))
289                 goto bad;
290
291         bus_addr = (iommu->page_table_map_base +
292                     (entry << IO_PAGE_SHIFT));
293         ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
294         base_paddr = __pa(oaddr & IO_PAGE_MASK);
295         prot = HV_PCI_MAP_ATTR_READ;
296         if (direction != DMA_TO_DEVICE)
297                 prot |= HV_PCI_MAP_ATTR_WRITE;
298
299         local_irq_save(flags);
300
301         iommu_batch_start(dev, prot, entry);
302
303         for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
304                 long err = iommu_batch_add(base_paddr);
305                 if (unlikely(err < 0L))
306                         goto iommu_map_fail;
307         }
308         if (unlikely(iommu_batch_end() < 0L))
309                 goto iommu_map_fail;
310
311         local_irq_restore(flags);
312
313         return ret;
314
315 bad:
316         if (printk_ratelimit())
317                 WARN_ON(1);
318         return DMA_ERROR_CODE;
319
320 iommu_map_fail:
321         /* Interrupts are disabled.  */
322         spin_lock(&iommu->lock);
323         arena_free(&iommu->arena, entry, npages);
324         spin_unlock_irqrestore(&iommu->lock, flags);
325
326         return DMA_ERROR_CODE;
327 }
328
329 static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
330                                 size_t sz, enum dma_data_direction direction)
331 {
332         struct pci_pbm_info *pbm;
333         struct iommu *iommu;
334         unsigned long flags, npages;
335         long entry;
336         u32 devhandle;
337
338         if (unlikely(direction == DMA_NONE)) {
339                 if (printk_ratelimit())
340                         WARN_ON(1);
341                 return;
342         }
343
344         iommu = dev->archdata.iommu;
345         pbm = dev->archdata.host_controller;
346         devhandle = pbm->devhandle;
347
348         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
349         npages >>= IO_PAGE_SHIFT;
350         bus_addr &= IO_PAGE_MASK;
351
352         spin_lock_irqsave(&iommu->lock, flags);
353
354         entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
355         arena_free(&iommu->arena, entry, npages);
356
357         do {
358                 unsigned long num;
359
360                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
361                                             npages);
362                 entry += num;
363                 npages -= num;
364         } while (npages != 0);
365
366         spin_unlock_irqrestore(&iommu->lock, flags);
367 }
368
369 #define SG_ENT_PHYS_ADDRESS(SG) \
370         (__pa(page_address((SG)->page)) + (SG)->offset)
371
372 static inline long fill_sg(long entry, struct device *dev,
373                            struct scatterlist *sg,
374                            int nused, int nelems, unsigned long prot)
375 {
376         struct scatterlist *dma_sg = sg;
377         struct scatterlist *sg_end = sg_last(sg, nelems);
378         unsigned long flags;
379         int i;
380
381         local_irq_save(flags);
382
383         iommu_batch_start(dev, prot, entry);
384
385         for (i = 0; i < nused; i++) {
386                 unsigned long pteval = ~0UL;
387                 u32 dma_npages;
388
389                 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
390                               dma_sg->dma_length +
391                               ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
392                 do {
393                         unsigned long offset;
394                         signed int len;
395
396                         /* If we are here, we know we have at least one
397                          * more page to map.  So walk forward until we
398                          * hit a page crossing, and begin creating new
399                          * mappings from that spot.
400                          */
401                         for (;;) {
402                                 unsigned long tmp;
403
404                                 tmp = SG_ENT_PHYS_ADDRESS(sg);
405                                 len = sg->length;
406                                 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
407                                         pteval = tmp & IO_PAGE_MASK;
408                                         offset = tmp & (IO_PAGE_SIZE - 1UL);
409                                         break;
410                                 }
411                                 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
412                                         pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
413                                         offset = 0UL;
414                                         len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
415                                         break;
416                                 }
417                                 sg = sg_next(sg);
418                         }
419
420                         pteval = (pteval & IOPTE_PAGE);
421                         while (len > 0) {
422                                 long err;
423
424                                 err = iommu_batch_add(pteval);
425                                 if (unlikely(err < 0L))
426                                         goto iommu_map_failed;
427
428                                 pteval += IO_PAGE_SIZE;
429                                 len -= (IO_PAGE_SIZE - offset);
430                                 offset = 0;
431                                 dma_npages--;
432                         }
433
434                         pteval = (pteval & IOPTE_PAGE) + len;
435                         sg = sg_next(sg);
436
437                         /* Skip over any tail mappings we've fully mapped,
438                          * adjusting pteval along the way.  Stop when we
439                          * detect a page crossing event.
440                          */
441                         while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
442                                (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
443                                ((pteval ^
444                                  (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
445                                 pteval += sg->length;
446                                 if (sg == sg_end)
447                                         break;
448                                 sg = sg_next(sg);
449                         }
450                         if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
451                                 pteval = ~0UL;
452                 } while (dma_npages != 0);
453                 dma_sg = sg_next(dma_sg);
454         }
455
456         if (unlikely(iommu_batch_end() < 0L))
457                 goto iommu_map_failed;
458
459         local_irq_restore(flags);
460         return 0;
461
462 iommu_map_failed:
463         local_irq_restore(flags);
464         return -1L;
465 }
466
467 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
468                          int nelems, enum dma_data_direction direction)
469 {
470         struct iommu *iommu;
471         unsigned long flags, npages, prot;
472         u32 dma_base;
473         struct scatterlist *sgtmp;
474         long entry, err;
475         int used;
476
477         /* Fast path single entry scatterlists. */
478         if (nelems == 1) {
479                 sglist->dma_address =
480                         dma_4v_map_single(dev,
481                                           (page_address(sglist->page) +
482                                            sglist->offset),
483                                           sglist->length, direction);
484                 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
485                         return 0;
486                 sglist->dma_length = sglist->length;
487                 return 1;
488         }
489
490         iommu = dev->archdata.iommu;
491         
492         if (unlikely(direction == DMA_NONE))
493                 goto bad;
494
495         /* Step 1: Prepare scatter list. */
496         npages = prepare_sg(sglist, nelems);
497
498         /* Step 2: Allocate a cluster and context, if necessary. */
499         spin_lock_irqsave(&iommu->lock, flags);
500         entry = arena_alloc(&iommu->arena, npages);
501         spin_unlock_irqrestore(&iommu->lock, flags);
502
503         if (unlikely(entry < 0L))
504                 goto bad;
505
506         dma_base = iommu->page_table_map_base +
507                 (entry << IO_PAGE_SHIFT);
508
509         /* Step 3: Normalize DMA addresses. */
510         used = nelems;
511
512         sgtmp = sglist;
513         while (used && sgtmp->dma_length) {
514                 sgtmp->dma_address += dma_base;
515                 sgtmp = sg_next(sgtmp);
516                 used--;
517         }
518         used = nelems - used;
519
520         /* Step 4: Create the mappings. */
521         prot = HV_PCI_MAP_ATTR_READ;
522         if (direction != DMA_TO_DEVICE)
523                 prot |= HV_PCI_MAP_ATTR_WRITE;
524
525         err = fill_sg(entry, dev, sglist, used, nelems, prot);
526         if (unlikely(err < 0L))
527                 goto iommu_map_failed;
528
529         return used;
530
531 bad:
532         if (printk_ratelimit())
533                 WARN_ON(1);
534         return 0;
535
536 iommu_map_failed:
537         spin_lock_irqsave(&iommu->lock, flags);
538         arena_free(&iommu->arena, entry, npages);
539         spin_unlock_irqrestore(&iommu->lock, flags);
540
541         return 0;
542 }
543
544 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
545                             int nelems, enum dma_data_direction direction)
546 {
547         struct pci_pbm_info *pbm;
548         struct iommu *iommu;
549         unsigned long flags, i, npages;
550         struct scatterlist *sg, *sgprv;
551         long entry;
552         u32 devhandle, bus_addr;
553
554         if (unlikely(direction == DMA_NONE)) {
555                 if (printk_ratelimit())
556                         WARN_ON(1);
557         }
558
559         iommu = dev->archdata.iommu;
560         pbm = dev->archdata.host_controller;
561         devhandle = pbm->devhandle;
562         
563         bus_addr = sglist->dma_address & IO_PAGE_MASK;
564         sgprv = NULL;
565         for_each_sg(sglist, sg, nelems, i) {
566                 if (sg->dma_length == 0)
567                         break;
568
569                 sgprv = sg;
570         }
571
572         npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
573                   bus_addr) >> IO_PAGE_SHIFT;
574
575         entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
576
577         spin_lock_irqsave(&iommu->lock, flags);
578
579         arena_free(&iommu->arena, entry, npages);
580
581         do {
582                 unsigned long num;
583
584                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
585                                             npages);
586                 entry += num;
587                 npages -= num;
588         } while (npages != 0);
589
590         spin_unlock_irqrestore(&iommu->lock, flags);
591 }
592
593 static void dma_4v_sync_single_for_cpu(struct device *dev,
594                                        dma_addr_t bus_addr, size_t sz,
595                                        enum dma_data_direction direction)
596 {
597         /* Nothing to do... */
598 }
599
600 static void dma_4v_sync_sg_for_cpu(struct device *dev,
601                                    struct scatterlist *sglist, int nelems,
602                                    enum dma_data_direction direction)
603 {
604         /* Nothing to do... */
605 }
606
607 const struct dma_ops sun4v_dma_ops = {
608         .alloc_coherent                 = dma_4v_alloc_coherent,
609         .free_coherent                  = dma_4v_free_coherent,
610         .map_single                     = dma_4v_map_single,
611         .unmap_single                   = dma_4v_unmap_single,
612         .map_sg                         = dma_4v_map_sg,
613         .unmap_sg                       = dma_4v_unmap_sg,
614         .sync_single_for_cpu            = dma_4v_sync_single_for_cpu,
615         .sync_sg_for_cpu                = dma_4v_sync_sg_for_cpu,
616 };
617
618 static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
619 {
620         struct property *prop;
621         struct device_node *dp;
622
623         dp = pbm->prom_node;
624         prop = of_find_property(dp, "66mhz-capable", NULL);
625         pbm->is_66mhz_capable = (prop != NULL);
626         pbm->pci_bus = pci_scan_one_pbm(pbm);
627
628         /* XXX register error interrupt handlers XXX */
629 }
630
631 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
632                                             struct iommu *iommu)
633 {
634         struct iommu_arena *arena = &iommu->arena;
635         unsigned long i, cnt = 0;
636         u32 devhandle;
637
638         devhandle = pbm->devhandle;
639         for (i = 0; i < arena->limit; i++) {
640                 unsigned long ret, io_attrs, ra;
641
642                 ret = pci_sun4v_iommu_getmap(devhandle,
643                                              HV_PCI_TSBID(0, i),
644                                              &io_attrs, &ra);
645                 if (ret == HV_EOK) {
646                         if (page_in_phys_avail(ra)) {
647                                 pci_sun4v_iommu_demap(devhandle,
648                                                       HV_PCI_TSBID(0, i), 1);
649                         } else {
650                                 cnt++;
651                                 __set_bit(i, arena->map);
652                         }
653                 }
654         }
655
656         return cnt;
657 }
658
659 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
660 {
661         struct iommu *iommu = pbm->iommu;
662         struct property *prop;
663         unsigned long num_tsb_entries, sz, tsbsize;
664         u32 vdma[2], dma_mask, dma_offset;
665
666         prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
667         if (prop) {
668                 u32 *val = prop->value;
669
670                 vdma[0] = val[0];
671                 vdma[1] = val[1];
672         } else {
673                 /* No property, use default values. */
674                 vdma[0] = 0x80000000;
675                 vdma[1] = 0x80000000;
676         }
677
678         if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
679                 prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
680                             vdma[0], vdma[1]);
681                 prom_halt();
682         };
683
684         dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
685         num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
686         tsbsize = num_tsb_entries * sizeof(iopte_t);
687
688         dma_offset = vdma[0];
689
690         /* Setup initial software IOMMU state. */
691         spin_lock_init(&iommu->lock);
692         iommu->ctx_lowest_free = 1;
693         iommu->page_table_map_base = dma_offset;
694         iommu->dma_addr_mask = dma_mask;
695
696         /* Allocate and initialize the free area map.  */
697         sz = (num_tsb_entries + 7) / 8;
698         sz = (sz + 7UL) & ~7UL;
699         iommu->arena.map = kzalloc(sz, GFP_KERNEL);
700         if (!iommu->arena.map) {
701                 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
702                 prom_halt();
703         }
704         iommu->arena.limit = num_tsb_entries;
705
706         sz = probe_existing_entries(pbm, iommu);
707         if (sz)
708                 printk("%s: Imported %lu TSB entries from OBP\n",
709                        pbm->name, sz);
710 }
711
712 #ifdef CONFIG_PCI_MSI
713 struct pci_sun4v_msiq_entry {
714         u64             version_type;
715 #define MSIQ_VERSION_MASK               0xffffffff00000000UL
716 #define MSIQ_VERSION_SHIFT              32
717 #define MSIQ_TYPE_MASK                  0x00000000000000ffUL
718 #define MSIQ_TYPE_SHIFT                 0
719 #define MSIQ_TYPE_NONE                  0x00
720 #define MSIQ_TYPE_MSG                   0x01
721 #define MSIQ_TYPE_MSI32                 0x02
722 #define MSIQ_TYPE_MSI64                 0x03
723 #define MSIQ_TYPE_INTX                  0x08
724 #define MSIQ_TYPE_NONE2                 0xff
725
726         u64             intx_sysino;
727         u64             reserved1;
728         u64             stick;
729         u64             req_id;  /* bus/device/func */
730 #define MSIQ_REQID_BUS_MASK             0xff00UL
731 #define MSIQ_REQID_BUS_SHIFT            8
732 #define MSIQ_REQID_DEVICE_MASK          0x00f8UL
733 #define MSIQ_REQID_DEVICE_SHIFT         3
734 #define MSIQ_REQID_FUNC_MASK            0x0007UL
735 #define MSIQ_REQID_FUNC_SHIFT           0
736
737         u64             msi_address;
738
739         /* The format of this value is message type dependent.
740          * For MSI bits 15:0 are the data from the MSI packet.
741          * For MSI-X bits 31:0 are the data from the MSI packet.
742          * For MSG, the message code and message routing code where:
743          *      bits 39:32 is the bus/device/fn of the msg target-id
744          *      bits 18:16 is the message routing code
745          *      bits 7:0 is the message code
746          * For INTx the low order 2-bits are:
747          *      00 - INTA
748          *      01 - INTB
749          *      10 - INTC
750          *      11 - INTD
751          */
752         u64             msi_data;
753
754         u64             reserved2;
755 };
756
757 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
758                               unsigned long *head)
759 {
760         unsigned long err, limit;
761
762         err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
763         if (unlikely(err))
764                 return -ENXIO;
765
766         limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
767         if (unlikely(*head >= limit))
768                 return -EFBIG;
769
770         return 0;
771 }
772
773 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
774                                  unsigned long msiqid, unsigned long *head,
775                                  unsigned long *msi)
776 {
777         struct pci_sun4v_msiq_entry *ep;
778         unsigned long err, type;
779
780         /* Note: void pointer arithmetic, 'head' is a byte offset  */
781         ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
782                                  (pbm->msiq_ent_count *
783                                   sizeof(struct pci_sun4v_msiq_entry))) +
784               *head);
785
786         if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
787                 return 0;
788
789         type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
790         if (unlikely(type != MSIQ_TYPE_MSI32 &&
791                      type != MSIQ_TYPE_MSI64))
792                 return -EINVAL;
793
794         *msi = ep->msi_data;
795
796         err = pci_sun4v_msi_setstate(pbm->devhandle,
797                                      ep->msi_data /* msi_num */,
798                                      HV_MSISTATE_IDLE);
799         if (unlikely(err))
800                 return -ENXIO;
801
802         /* Clear the entry.  */
803         ep->version_type &= ~MSIQ_TYPE_MASK;
804
805         (*head) += sizeof(struct pci_sun4v_msiq_entry);
806         if (*head >=
807             (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
808                 *head = 0;
809
810         return 1;
811 }
812
813 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
814                               unsigned long head)
815 {
816         unsigned long err;
817
818         err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
819         if (unlikely(err))
820                 return -EINVAL;
821
822         return 0;
823 }
824
825 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
826                                unsigned long msi, int is_msi64)
827 {
828         if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
829                                   (is_msi64 ?
830                                    HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
831                 return -ENXIO;
832         if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
833                 return -ENXIO;
834         if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
835                 return -ENXIO;
836         return 0;
837 }
838
839 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
840 {
841         unsigned long err, msiqid;
842
843         err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
844         if (err)
845                 return -ENXIO;
846
847         pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
848
849         return 0;
850 }
851
852 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
853 {
854         unsigned long q_size, alloc_size, pages, order;
855         int i;
856
857         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
858         alloc_size = (pbm->msiq_num * q_size);
859         order = get_order(alloc_size);
860         pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
861         if (pages == 0UL) {
862                 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
863                        order);
864                 return -ENOMEM;
865         }
866         memset((char *)pages, 0, PAGE_SIZE << order);
867         pbm->msi_queues = (void *) pages;
868
869         for (i = 0; i < pbm->msiq_num; i++) {
870                 unsigned long err, base = __pa(pages + (i * q_size));
871                 unsigned long ret1, ret2;
872
873                 err = pci_sun4v_msiq_conf(pbm->devhandle,
874                                           pbm->msiq_first + i,
875                                           base, pbm->msiq_ent_count);
876                 if (err) {
877                         printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
878                                err);
879                         goto h_error;
880                 }
881
882                 err = pci_sun4v_msiq_info(pbm->devhandle,
883                                           pbm->msiq_first + i,
884                                           &ret1, &ret2);
885                 if (err) {
886                         printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
887                                err);
888                         goto h_error;
889                 }
890                 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
891                         printk(KERN_ERR "MSI: Bogus qconf "
892                                "expected[%lx:%x] got[%lx:%lx]\n",
893                                base, pbm->msiq_ent_count,
894                                ret1, ret2);
895                         goto h_error;
896                 }
897         }
898
899         return 0;
900
901 h_error:
902         free_pages(pages, order);
903         return -EINVAL;
904 }
905
906 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
907 {
908         unsigned long q_size, alloc_size, pages, order;
909         int i;
910
911         for (i = 0; i < pbm->msiq_num; i++) {
912                 unsigned long msiqid = pbm->msiq_first + i;
913
914                 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
915         }
916
917         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
918         alloc_size = (pbm->msiq_num * q_size);
919         order = get_order(alloc_size);
920
921         pages = (unsigned long) pbm->msi_queues;
922
923         free_pages(pages, order);
924
925         pbm->msi_queues = NULL;
926 }
927
928 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
929                                     unsigned long msiqid,
930                                     unsigned long devino)
931 {
932         unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
933
934         if (!virt_irq)
935                 return -ENOMEM;
936
937         if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
938                 return -EINVAL;
939         if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
940                 return -EINVAL;
941
942         return virt_irq;
943 }
944
945 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
946         .get_head       =       pci_sun4v_get_head,
947         .dequeue_msi    =       pci_sun4v_dequeue_msi,
948         .set_head       =       pci_sun4v_set_head,
949         .msi_setup      =       pci_sun4v_msi_setup,
950         .msi_teardown   =       pci_sun4v_msi_teardown,
951         .msiq_alloc     =       pci_sun4v_msiq_alloc,
952         .msiq_free      =       pci_sun4v_msiq_free,
953         .msiq_build_irq =       pci_sun4v_msiq_build_irq,
954 };
955
956 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
957 {
958         sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
959 }
960 #else /* CONFIG_PCI_MSI */
961 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
962 {
963 }
964 #endif /* !(CONFIG_PCI_MSI) */
965
966 static void __init pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
967 {
968         struct pci_pbm_info *pbm;
969
970         if (devhandle & 0x40)
971                 pbm = &p->pbm_B;
972         else
973                 pbm = &p->pbm_A;
974
975         pbm->next = pci_pbm_root;
976         pci_pbm_root = pbm;
977
978         pbm->scan_bus = pci_sun4v_scan_bus;
979         pbm->pci_ops = &sun4v_pci_ops;
980         pbm->config_space_reg_bits = 12;
981
982         pbm->index = pci_num_pbms++;
983
984         pbm->parent = p;
985         pbm->prom_node = dp;
986
987         pbm->devhandle = devhandle;
988
989         pbm->name = dp->full_name;
990
991         printk("%s: SUN4V PCI Bus Module\n", pbm->name);
992
993         pci_determine_mem_io_space(pbm);
994
995         pci_get_pbm_props(pbm);
996         pci_sun4v_iommu_init(pbm);
997         pci_sun4v_msi_init(pbm);
998 }
999
1000 void __init sun4v_pci_init(struct device_node *dp, char *model_name)
1001 {
1002         static int hvapi_negotiated = 0;
1003         struct pci_controller_info *p;
1004         struct pci_pbm_info *pbm;
1005         struct iommu *iommu;
1006         struct property *prop;
1007         struct linux_prom64_registers *regs;
1008         u32 devhandle;
1009         int i;
1010
1011         if (!hvapi_negotiated++) {
1012                 int err = sun4v_hvapi_register(HV_GRP_PCI,
1013                                                vpci_major,
1014                                                &vpci_minor);
1015
1016                 if (err) {
1017                         prom_printf("SUN4V_PCI: Could not register hvapi, "
1018                                     "err=%d\n", err);
1019                         prom_halt();
1020                 }
1021                 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
1022                        vpci_major, vpci_minor);
1023
1024                 dma_ops = &sun4v_dma_ops;
1025         }
1026
1027         prop = of_find_property(dp, "reg", NULL);
1028         regs = prop->value;
1029
1030         devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1031
1032         for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
1033                 if (pbm->devhandle == (devhandle ^ 0x40)) {
1034                         pci_sun4v_pbm_init(pbm->parent, dp, devhandle);
1035                         return;
1036                 }
1037         }
1038
1039         for_each_possible_cpu(i) {
1040                 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1041
1042                 if (!page)
1043                         goto fatal_memory_error;
1044
1045                 per_cpu(iommu_batch, i).pglist = (u64 *) page;
1046         }
1047
1048         p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1049         if (!p)
1050                 goto fatal_memory_error;
1051
1052         iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1053         if (!iommu)
1054                 goto fatal_memory_error;
1055
1056         p->pbm_A.iommu = iommu;
1057
1058         iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1059         if (!iommu)
1060                 goto fatal_memory_error;
1061
1062         p->pbm_B.iommu = iommu;
1063
1064         pci_sun4v_pbm_init(p, dp, devhandle);
1065         return;
1066
1067 fatal_memory_error:
1068         prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
1069         prom_halt();
1070 }