Merge branches 'stable/balloon.cleanup' and 'stable/general.cleanup' of git://git...
[pandora-kernel.git] / arch / sparc / kernel / pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2  *
3  * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
16 #include <linux/of_device.h>
17
18 #include <asm/iommu.h>
19 #include <asm/irq.h>
20 #include <asm/hypervisor.h>
21 #include <asm/prom.h>
22
23 #include "pci_impl.h"
24 #include "iommu_common.h"
25
26 #include "pci_sun4v.h"
27
28 #define DRIVER_NAME     "pci_sun4v"
29 #define PFX             DRIVER_NAME ": "
30
31 static unsigned long vpci_major = 1;
32 static unsigned long vpci_minor = 1;
33
34 #define PGLIST_NENTS    (PAGE_SIZE / sizeof(u64))
35
36 struct iommu_batch {
37         struct device   *dev;           /* Device mapping is for.       */
38         unsigned long   prot;           /* IOMMU page protections       */
39         unsigned long   entry;          /* Index into IOTSB.            */
40         u64             *pglist;        /* List of physical pages       */
41         unsigned long   npages;         /* Number of pages in list.     */
42 };
43
44 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
45 static int iommu_batch_initialized;
46
47 /* Interrupts must be disabled.  */
48 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
49 {
50         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
51
52         p->dev          = dev;
53         p->prot         = prot;
54         p->entry        = entry;
55         p->npages       = 0;
56 }
57
58 /* Interrupts must be disabled.  */
59 static long iommu_batch_flush(struct iommu_batch *p)
60 {
61         struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
62         unsigned long devhandle = pbm->devhandle;
63         unsigned long prot = p->prot;
64         unsigned long entry = p->entry;
65         u64 *pglist = p->pglist;
66         unsigned long npages = p->npages;
67
68         while (npages != 0) {
69                 long num;
70
71                 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
72                                           npages, prot, __pa(pglist));
73                 if (unlikely(num < 0)) {
74                         if (printk_ratelimit())
75                                 printk("iommu_batch_flush: IOMMU map of "
76                                        "[%08lx:%08llx:%lx:%lx:%lx] failed with "
77                                        "status %ld\n",
78                                        devhandle, HV_PCI_TSBID(0, entry),
79                                        npages, prot, __pa(pglist), num);
80                         return -1;
81                 }
82
83                 entry += num;
84                 npages -= num;
85                 pglist += num;
86         }
87
88         p->entry = entry;
89         p->npages = 0;
90
91         return 0;
92 }
93
94 static inline void iommu_batch_new_entry(unsigned long entry)
95 {
96         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
97
98         if (p->entry + p->npages == entry)
99                 return;
100         if (p->entry != ~0UL)
101                 iommu_batch_flush(p);
102         p->entry = entry;
103 }
104
105 /* Interrupts must be disabled.  */
106 static inline long iommu_batch_add(u64 phys_page)
107 {
108         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
109
110         BUG_ON(p->npages >= PGLIST_NENTS);
111
112         p->pglist[p->npages++] = phys_page;
113         if (p->npages == PGLIST_NENTS)
114                 return iommu_batch_flush(p);
115
116         return 0;
117 }
118
119 /* Interrupts must be disabled.  */
120 static inline long iommu_batch_end(void)
121 {
122         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
123
124         BUG_ON(p->npages >= PGLIST_NENTS);
125
126         return iommu_batch_flush(p);
127 }
128
129 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
130                                    dma_addr_t *dma_addrp, gfp_t gfp)
131 {
132         unsigned long flags, order, first_page, npages, n;
133         struct iommu *iommu;
134         struct page *page;
135         void *ret;
136         long entry;
137         int nid;
138
139         size = IO_PAGE_ALIGN(size);
140         order = get_order(size);
141         if (unlikely(order >= MAX_ORDER))
142                 return NULL;
143
144         npages = size >> IO_PAGE_SHIFT;
145
146         nid = dev->archdata.numa_node;
147         page = alloc_pages_node(nid, gfp, order);
148         if (unlikely(!page))
149                 return NULL;
150
151         first_page = (unsigned long) page_address(page);
152         memset((char *)first_page, 0, PAGE_SIZE << order);
153
154         iommu = dev->archdata.iommu;
155
156         spin_lock_irqsave(&iommu->lock, flags);
157         entry = iommu_range_alloc(dev, iommu, npages, NULL);
158         spin_unlock_irqrestore(&iommu->lock, flags);
159
160         if (unlikely(entry == DMA_ERROR_CODE))
161                 goto range_alloc_fail;
162
163         *dma_addrp = (iommu->page_table_map_base +
164                       (entry << IO_PAGE_SHIFT));
165         ret = (void *) first_page;
166         first_page = __pa(first_page);
167
168         local_irq_save(flags);
169
170         iommu_batch_start(dev,
171                           (HV_PCI_MAP_ATTR_READ |
172                            HV_PCI_MAP_ATTR_WRITE),
173                           entry);
174
175         for (n = 0; n < npages; n++) {
176                 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
177                 if (unlikely(err < 0L))
178                         goto iommu_map_fail;
179         }
180
181         if (unlikely(iommu_batch_end() < 0L))
182                 goto iommu_map_fail;
183
184         local_irq_restore(flags);
185
186         return ret;
187
188 iommu_map_fail:
189         /* Interrupts are disabled.  */
190         spin_lock(&iommu->lock);
191         iommu_range_free(iommu, *dma_addrp, npages);
192         spin_unlock_irqrestore(&iommu->lock, flags);
193
194 range_alloc_fail:
195         free_pages(first_page, order);
196         return NULL;
197 }
198
199 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
200                                  dma_addr_t dvma)
201 {
202         struct pci_pbm_info *pbm;
203         struct iommu *iommu;
204         unsigned long flags, order, npages, entry;
205         u32 devhandle;
206
207         npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
208         iommu = dev->archdata.iommu;
209         pbm = dev->archdata.host_controller;
210         devhandle = pbm->devhandle;
211         entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
212
213         spin_lock_irqsave(&iommu->lock, flags);
214
215         iommu_range_free(iommu, dvma, npages);
216
217         do {
218                 unsigned long num;
219
220                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
221                                             npages);
222                 entry += num;
223                 npages -= num;
224         } while (npages != 0);
225
226         spin_unlock_irqrestore(&iommu->lock, flags);
227
228         order = get_order(size);
229         if (order < 10)
230                 free_pages((unsigned long)cpu, order);
231 }
232
233 static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
234                                   unsigned long offset, size_t sz,
235                                   enum dma_data_direction direction,
236                                   struct dma_attrs *attrs)
237 {
238         struct iommu *iommu;
239         unsigned long flags, npages, oaddr;
240         unsigned long i, base_paddr;
241         u32 bus_addr, ret;
242         unsigned long prot;
243         long entry;
244
245         iommu = dev->archdata.iommu;
246
247         if (unlikely(direction == DMA_NONE))
248                 goto bad;
249
250         oaddr = (unsigned long)(page_address(page) + offset);
251         npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
252         npages >>= IO_PAGE_SHIFT;
253
254         spin_lock_irqsave(&iommu->lock, flags);
255         entry = iommu_range_alloc(dev, iommu, npages, NULL);
256         spin_unlock_irqrestore(&iommu->lock, flags);
257
258         if (unlikely(entry == DMA_ERROR_CODE))
259                 goto bad;
260
261         bus_addr = (iommu->page_table_map_base +
262                     (entry << IO_PAGE_SHIFT));
263         ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
264         base_paddr = __pa(oaddr & IO_PAGE_MASK);
265         prot = HV_PCI_MAP_ATTR_READ;
266         if (direction != DMA_TO_DEVICE)
267                 prot |= HV_PCI_MAP_ATTR_WRITE;
268
269         local_irq_save(flags);
270
271         iommu_batch_start(dev, prot, entry);
272
273         for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
274                 long err = iommu_batch_add(base_paddr);
275                 if (unlikely(err < 0L))
276                         goto iommu_map_fail;
277         }
278         if (unlikely(iommu_batch_end() < 0L))
279                 goto iommu_map_fail;
280
281         local_irq_restore(flags);
282
283         return ret;
284
285 bad:
286         if (printk_ratelimit())
287                 WARN_ON(1);
288         return DMA_ERROR_CODE;
289
290 iommu_map_fail:
291         /* Interrupts are disabled.  */
292         spin_lock(&iommu->lock);
293         iommu_range_free(iommu, bus_addr, npages);
294         spin_unlock_irqrestore(&iommu->lock, flags);
295
296         return DMA_ERROR_CODE;
297 }
298
299 static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
300                               size_t sz, enum dma_data_direction direction,
301                               struct dma_attrs *attrs)
302 {
303         struct pci_pbm_info *pbm;
304         struct iommu *iommu;
305         unsigned long flags, npages;
306         long entry;
307         u32 devhandle;
308
309         if (unlikely(direction == DMA_NONE)) {
310                 if (printk_ratelimit())
311                         WARN_ON(1);
312                 return;
313         }
314
315         iommu = dev->archdata.iommu;
316         pbm = dev->archdata.host_controller;
317         devhandle = pbm->devhandle;
318
319         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
320         npages >>= IO_PAGE_SHIFT;
321         bus_addr &= IO_PAGE_MASK;
322
323         spin_lock_irqsave(&iommu->lock, flags);
324
325         iommu_range_free(iommu, bus_addr, npages);
326
327         entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
328         do {
329                 unsigned long num;
330
331                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
332                                             npages);
333                 entry += num;
334                 npages -= num;
335         } while (npages != 0);
336
337         spin_unlock_irqrestore(&iommu->lock, flags);
338 }
339
340 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
341                          int nelems, enum dma_data_direction direction,
342                          struct dma_attrs *attrs)
343 {
344         struct scatterlist *s, *outs, *segstart;
345         unsigned long flags, handle, prot;
346         dma_addr_t dma_next = 0, dma_addr;
347         unsigned int max_seg_size;
348         unsigned long seg_boundary_size;
349         int outcount, incount, i;
350         struct iommu *iommu;
351         unsigned long base_shift;
352         long err;
353
354         BUG_ON(direction == DMA_NONE);
355
356         iommu = dev->archdata.iommu;
357         if (nelems == 0 || !iommu)
358                 return 0;
359         
360         prot = HV_PCI_MAP_ATTR_READ;
361         if (direction != DMA_TO_DEVICE)
362                 prot |= HV_PCI_MAP_ATTR_WRITE;
363
364         outs = s = segstart = &sglist[0];
365         outcount = 1;
366         incount = nelems;
367         handle = 0;
368
369         /* Init first segment length for backout at failure */
370         outs->dma_length = 0;
371
372         spin_lock_irqsave(&iommu->lock, flags);
373
374         iommu_batch_start(dev, prot, ~0UL);
375
376         max_seg_size = dma_get_max_seg_size(dev);
377         seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
378                                   IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
379         base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
380         for_each_sg(sglist, s, nelems, i) {
381                 unsigned long paddr, npages, entry, out_entry = 0, slen;
382
383                 slen = s->length;
384                 /* Sanity check */
385                 if (slen == 0) {
386                         dma_next = 0;
387                         continue;
388                 }
389                 /* Allocate iommu entries for that segment */
390                 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
391                 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
392                 entry = iommu_range_alloc(dev, iommu, npages, &handle);
393
394                 /* Handle failure */
395                 if (unlikely(entry == DMA_ERROR_CODE)) {
396                         if (printk_ratelimit())
397                                 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
398                                        " npages %lx\n", iommu, paddr, npages);
399                         goto iommu_map_failed;
400                 }
401
402                 iommu_batch_new_entry(entry);
403
404                 /* Convert entry to a dma_addr_t */
405                 dma_addr = iommu->page_table_map_base +
406                         (entry << IO_PAGE_SHIFT);
407                 dma_addr |= (s->offset & ~IO_PAGE_MASK);
408
409                 /* Insert into HW table */
410                 paddr &= IO_PAGE_MASK;
411                 while (npages--) {
412                         err = iommu_batch_add(paddr);
413                         if (unlikely(err < 0L))
414                                 goto iommu_map_failed;
415                         paddr += IO_PAGE_SIZE;
416                 }
417
418                 /* If we are in an open segment, try merging */
419                 if (segstart != s) {
420                         /* We cannot merge if:
421                          * - allocated dma_addr isn't contiguous to previous allocation
422                          */
423                         if ((dma_addr != dma_next) ||
424                             (outs->dma_length + s->length > max_seg_size) ||
425                             (is_span_boundary(out_entry, base_shift,
426                                               seg_boundary_size, outs, s))) {
427                                 /* Can't merge: create a new segment */
428                                 segstart = s;
429                                 outcount++;
430                                 outs = sg_next(outs);
431                         } else {
432                                 outs->dma_length += s->length;
433                         }
434                 }
435
436                 if (segstart == s) {
437                         /* This is a new segment, fill entries */
438                         outs->dma_address = dma_addr;
439                         outs->dma_length = slen;
440                         out_entry = entry;
441                 }
442
443                 /* Calculate next page pointer for contiguous check */
444                 dma_next = dma_addr + slen;
445         }
446
447         err = iommu_batch_end();
448
449         if (unlikely(err < 0L))
450                 goto iommu_map_failed;
451
452         spin_unlock_irqrestore(&iommu->lock, flags);
453
454         if (outcount < incount) {
455                 outs = sg_next(outs);
456                 outs->dma_address = DMA_ERROR_CODE;
457                 outs->dma_length = 0;
458         }
459
460         return outcount;
461
462 iommu_map_failed:
463         for_each_sg(sglist, s, nelems, i) {
464                 if (s->dma_length != 0) {
465                         unsigned long vaddr, npages;
466
467                         vaddr = s->dma_address & IO_PAGE_MASK;
468                         npages = iommu_num_pages(s->dma_address, s->dma_length,
469                                                  IO_PAGE_SIZE);
470                         iommu_range_free(iommu, vaddr, npages);
471                         /* XXX demap? XXX */
472                         s->dma_address = DMA_ERROR_CODE;
473                         s->dma_length = 0;
474                 }
475                 if (s == outs)
476                         break;
477         }
478         spin_unlock_irqrestore(&iommu->lock, flags);
479
480         return 0;
481 }
482
483 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
484                             int nelems, enum dma_data_direction direction,
485                             struct dma_attrs *attrs)
486 {
487         struct pci_pbm_info *pbm;
488         struct scatterlist *sg;
489         struct iommu *iommu;
490         unsigned long flags;
491         u32 devhandle;
492
493         BUG_ON(direction == DMA_NONE);
494
495         iommu = dev->archdata.iommu;
496         pbm = dev->archdata.host_controller;
497         devhandle = pbm->devhandle;
498         
499         spin_lock_irqsave(&iommu->lock, flags);
500
501         sg = sglist;
502         while (nelems--) {
503                 dma_addr_t dma_handle = sg->dma_address;
504                 unsigned int len = sg->dma_length;
505                 unsigned long npages, entry;
506
507                 if (!len)
508                         break;
509                 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
510                 iommu_range_free(iommu, dma_handle, npages);
511
512                 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
513                 while (npages) {
514                         unsigned long num;
515
516                         num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
517                                                     npages);
518                         entry += num;
519                         npages -= num;
520                 }
521
522                 sg = sg_next(sg);
523         }
524
525         spin_unlock_irqrestore(&iommu->lock, flags);
526 }
527
528 static struct dma_map_ops sun4v_dma_ops = {
529         .alloc_coherent                 = dma_4v_alloc_coherent,
530         .free_coherent                  = dma_4v_free_coherent,
531         .map_page                       = dma_4v_map_page,
532         .unmap_page                     = dma_4v_unmap_page,
533         .map_sg                         = dma_4v_map_sg,
534         .unmap_sg                       = dma_4v_unmap_sg,
535 };
536
537 static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
538                                          struct device *parent)
539 {
540         struct property *prop;
541         struct device_node *dp;
542
543         dp = pbm->op->dev.of_node;
544         prop = of_find_property(dp, "66mhz-capable", NULL);
545         pbm->is_66mhz_capable = (prop != NULL);
546         pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
547
548         /* XXX register error interrupt handlers XXX */
549 }
550
551 static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm,
552                                                       struct iommu *iommu)
553 {
554         struct iommu_arena *arena = &iommu->arena;
555         unsigned long i, cnt = 0;
556         u32 devhandle;
557
558         devhandle = pbm->devhandle;
559         for (i = 0; i < arena->limit; i++) {
560                 unsigned long ret, io_attrs, ra;
561
562                 ret = pci_sun4v_iommu_getmap(devhandle,
563                                              HV_PCI_TSBID(0, i),
564                                              &io_attrs, &ra);
565                 if (ret == HV_EOK) {
566                         if (page_in_phys_avail(ra)) {
567                                 pci_sun4v_iommu_demap(devhandle,
568                                                       HV_PCI_TSBID(0, i), 1);
569                         } else {
570                                 cnt++;
571                                 __set_bit(i, arena->map);
572                         }
573                 }
574         }
575
576         return cnt;
577 }
578
579 static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
580 {
581         static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
582         struct iommu *iommu = pbm->iommu;
583         unsigned long num_tsb_entries, sz;
584         u32 dma_mask, dma_offset;
585         const u32 *vdma;
586
587         vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
588         if (!vdma)
589                 vdma = vdma_default;
590
591         if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
592                 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
593                        vdma[0], vdma[1]);
594                 return -EINVAL;
595         };
596
597         dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
598         num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
599
600         dma_offset = vdma[0];
601
602         /* Setup initial software IOMMU state. */
603         spin_lock_init(&iommu->lock);
604         iommu->ctx_lowest_free = 1;
605         iommu->page_table_map_base = dma_offset;
606         iommu->dma_addr_mask = dma_mask;
607
608         /* Allocate and initialize the free area map.  */
609         sz = (num_tsb_entries + 7) / 8;
610         sz = (sz + 7UL) & ~7UL;
611         iommu->arena.map = kzalloc(sz, GFP_KERNEL);
612         if (!iommu->arena.map) {
613                 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
614                 return -ENOMEM;
615         }
616         iommu->arena.limit = num_tsb_entries;
617
618         sz = probe_existing_entries(pbm, iommu);
619         if (sz)
620                 printk("%s: Imported %lu TSB entries from OBP\n",
621                        pbm->name, sz);
622
623         return 0;
624 }
625
626 #ifdef CONFIG_PCI_MSI
627 struct pci_sun4v_msiq_entry {
628         u64             version_type;
629 #define MSIQ_VERSION_MASK               0xffffffff00000000UL
630 #define MSIQ_VERSION_SHIFT              32
631 #define MSIQ_TYPE_MASK                  0x00000000000000ffUL
632 #define MSIQ_TYPE_SHIFT                 0
633 #define MSIQ_TYPE_NONE                  0x00
634 #define MSIQ_TYPE_MSG                   0x01
635 #define MSIQ_TYPE_MSI32                 0x02
636 #define MSIQ_TYPE_MSI64                 0x03
637 #define MSIQ_TYPE_INTX                  0x08
638 #define MSIQ_TYPE_NONE2                 0xff
639
640         u64             intx_sysino;
641         u64             reserved1;
642         u64             stick;
643         u64             req_id;  /* bus/device/func */
644 #define MSIQ_REQID_BUS_MASK             0xff00UL
645 #define MSIQ_REQID_BUS_SHIFT            8
646 #define MSIQ_REQID_DEVICE_MASK          0x00f8UL
647 #define MSIQ_REQID_DEVICE_SHIFT         3
648 #define MSIQ_REQID_FUNC_MASK            0x0007UL
649 #define MSIQ_REQID_FUNC_SHIFT           0
650
651         u64             msi_address;
652
653         /* The format of this value is message type dependent.
654          * For MSI bits 15:0 are the data from the MSI packet.
655          * For MSI-X bits 31:0 are the data from the MSI packet.
656          * For MSG, the message code and message routing code where:
657          *      bits 39:32 is the bus/device/fn of the msg target-id
658          *      bits 18:16 is the message routing code
659          *      bits 7:0 is the message code
660          * For INTx the low order 2-bits are:
661          *      00 - INTA
662          *      01 - INTB
663          *      10 - INTC
664          *      11 - INTD
665          */
666         u64             msi_data;
667
668         u64             reserved2;
669 };
670
671 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
672                               unsigned long *head)
673 {
674         unsigned long err, limit;
675
676         err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
677         if (unlikely(err))
678                 return -ENXIO;
679
680         limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
681         if (unlikely(*head >= limit))
682                 return -EFBIG;
683
684         return 0;
685 }
686
687 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
688                                  unsigned long msiqid, unsigned long *head,
689                                  unsigned long *msi)
690 {
691         struct pci_sun4v_msiq_entry *ep;
692         unsigned long err, type;
693
694         /* Note: void pointer arithmetic, 'head' is a byte offset  */
695         ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
696                                  (pbm->msiq_ent_count *
697                                   sizeof(struct pci_sun4v_msiq_entry))) +
698               *head);
699
700         if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
701                 return 0;
702
703         type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
704         if (unlikely(type != MSIQ_TYPE_MSI32 &&
705                      type != MSIQ_TYPE_MSI64))
706                 return -EINVAL;
707
708         *msi = ep->msi_data;
709
710         err = pci_sun4v_msi_setstate(pbm->devhandle,
711                                      ep->msi_data /* msi_num */,
712                                      HV_MSISTATE_IDLE);
713         if (unlikely(err))
714                 return -ENXIO;
715
716         /* Clear the entry.  */
717         ep->version_type &= ~MSIQ_TYPE_MASK;
718
719         (*head) += sizeof(struct pci_sun4v_msiq_entry);
720         if (*head >=
721             (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
722                 *head = 0;
723
724         return 1;
725 }
726
727 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
728                               unsigned long head)
729 {
730         unsigned long err;
731
732         err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
733         if (unlikely(err))
734                 return -EINVAL;
735
736         return 0;
737 }
738
739 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
740                                unsigned long msi, int is_msi64)
741 {
742         if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
743                                   (is_msi64 ?
744                                    HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
745                 return -ENXIO;
746         if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
747                 return -ENXIO;
748         if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
749                 return -ENXIO;
750         return 0;
751 }
752
753 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
754 {
755         unsigned long err, msiqid;
756
757         err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
758         if (err)
759                 return -ENXIO;
760
761         pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
762
763         return 0;
764 }
765
766 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
767 {
768         unsigned long q_size, alloc_size, pages, order;
769         int i;
770
771         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
772         alloc_size = (pbm->msiq_num * q_size);
773         order = get_order(alloc_size);
774         pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
775         if (pages == 0UL) {
776                 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
777                        order);
778                 return -ENOMEM;
779         }
780         memset((char *)pages, 0, PAGE_SIZE << order);
781         pbm->msi_queues = (void *) pages;
782
783         for (i = 0; i < pbm->msiq_num; i++) {
784                 unsigned long err, base = __pa(pages + (i * q_size));
785                 unsigned long ret1, ret2;
786
787                 err = pci_sun4v_msiq_conf(pbm->devhandle,
788                                           pbm->msiq_first + i,
789                                           base, pbm->msiq_ent_count);
790                 if (err) {
791                         printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
792                                err);
793                         goto h_error;
794                 }
795
796                 err = pci_sun4v_msiq_info(pbm->devhandle,
797                                           pbm->msiq_first + i,
798                                           &ret1, &ret2);
799                 if (err) {
800                         printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
801                                err);
802                         goto h_error;
803                 }
804                 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
805                         printk(KERN_ERR "MSI: Bogus qconf "
806                                "expected[%lx:%x] got[%lx:%lx]\n",
807                                base, pbm->msiq_ent_count,
808                                ret1, ret2);
809                         goto h_error;
810                 }
811         }
812
813         return 0;
814
815 h_error:
816         free_pages(pages, order);
817         return -EINVAL;
818 }
819
820 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
821 {
822         unsigned long q_size, alloc_size, pages, order;
823         int i;
824
825         for (i = 0; i < pbm->msiq_num; i++) {
826                 unsigned long msiqid = pbm->msiq_first + i;
827
828                 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
829         }
830
831         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
832         alloc_size = (pbm->msiq_num * q_size);
833         order = get_order(alloc_size);
834
835         pages = (unsigned long) pbm->msi_queues;
836
837         free_pages(pages, order);
838
839         pbm->msi_queues = NULL;
840 }
841
842 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
843                                     unsigned long msiqid,
844                                     unsigned long devino)
845 {
846         unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
847
848         if (!irq)
849                 return -ENOMEM;
850
851         if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
852                 return -EINVAL;
853         if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
854                 return -EINVAL;
855
856         return irq;
857 }
858
859 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
860         .get_head       =       pci_sun4v_get_head,
861         .dequeue_msi    =       pci_sun4v_dequeue_msi,
862         .set_head       =       pci_sun4v_set_head,
863         .msi_setup      =       pci_sun4v_msi_setup,
864         .msi_teardown   =       pci_sun4v_msi_teardown,
865         .msiq_alloc     =       pci_sun4v_msiq_alloc,
866         .msiq_free      =       pci_sun4v_msiq_free,
867         .msiq_build_irq =       pci_sun4v_msiq_build_irq,
868 };
869
870 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
871 {
872         sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
873 }
874 #else /* CONFIG_PCI_MSI */
875 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
876 {
877 }
878 #endif /* !(CONFIG_PCI_MSI) */
879
880 static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
881                                         struct platform_device *op, u32 devhandle)
882 {
883         struct device_node *dp = op->dev.of_node;
884         int err;
885
886         pbm->numa_node = of_node_to_nid(dp);
887
888         pbm->pci_ops = &sun4v_pci_ops;
889         pbm->config_space_reg_bits = 12;
890
891         pbm->index = pci_num_pbms++;
892
893         pbm->op = op;
894
895         pbm->devhandle = devhandle;
896
897         pbm->name = dp->full_name;
898
899         printk("%s: SUN4V PCI Bus Module\n", pbm->name);
900         printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
901
902         pci_determine_mem_io_space(pbm);
903
904         pci_get_pbm_props(pbm);
905
906         err = pci_sun4v_iommu_init(pbm);
907         if (err)
908                 return err;
909
910         pci_sun4v_msi_init(pbm);
911
912         pci_sun4v_scan_bus(pbm, &op->dev);
913
914         pbm->next = pci_pbm_root;
915         pci_pbm_root = pbm;
916
917         return 0;
918 }
919
920 static int __devinit pci_sun4v_probe(struct platform_device *op)
921 {
922         const struct linux_prom64_registers *regs;
923         static int hvapi_negotiated = 0;
924         struct pci_pbm_info *pbm;
925         struct device_node *dp;
926         struct iommu *iommu;
927         u32 devhandle;
928         int i, err;
929
930         dp = op->dev.of_node;
931
932         if (!hvapi_negotiated++) {
933                 err = sun4v_hvapi_register(HV_GRP_PCI,
934                                            vpci_major,
935                                            &vpci_minor);
936
937                 if (err) {
938                         printk(KERN_ERR PFX "Could not register hvapi, "
939                                "err=%d\n", err);
940                         return err;
941                 }
942                 printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
943                        vpci_major, vpci_minor);
944
945                 dma_ops = &sun4v_dma_ops;
946         }
947
948         regs = of_get_property(dp, "reg", NULL);
949         err = -ENODEV;
950         if (!regs) {
951                 printk(KERN_ERR PFX "Could not find config registers\n");
952                 goto out_err;
953         }
954         devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
955
956         err = -ENOMEM;
957         if (!iommu_batch_initialized) {
958                 for_each_possible_cpu(i) {
959                         unsigned long page = get_zeroed_page(GFP_KERNEL);
960
961                         if (!page)
962                                 goto out_err;
963
964                         per_cpu(iommu_batch, i).pglist = (u64 *) page;
965                 }
966                 iommu_batch_initialized = 1;
967         }
968
969         pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
970         if (!pbm) {
971                 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
972                 goto out_err;
973         }
974
975         iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
976         if (!iommu) {
977                 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
978                 goto out_free_controller;
979         }
980
981         pbm->iommu = iommu;
982
983         err = pci_sun4v_pbm_init(pbm, op, devhandle);
984         if (err)
985                 goto out_free_iommu;
986
987         dev_set_drvdata(&op->dev, pbm);
988
989         return 0;
990
991 out_free_iommu:
992         kfree(pbm->iommu);
993
994 out_free_controller:
995         kfree(pbm);
996
997 out_err:
998         return err;
999 }
1000
1001 static const struct of_device_id pci_sun4v_match[] = {
1002         {
1003                 .name = "pci",
1004                 .compatible = "SUNW,sun4v-pci",
1005         },
1006         {},
1007 };
1008
1009 static struct platform_driver pci_sun4v_driver = {
1010         .driver = {
1011                 .name = DRIVER_NAME,
1012                 .owner = THIS_MODULE,
1013                 .of_match_table = pci_sun4v_match,
1014         },
1015         .probe          = pci_sun4v_probe,
1016 };
1017
1018 static int __init pci_sun4v_init(void)
1019 {
1020         return platform_driver_register(&pci_sun4v_driver);
1021 }
1022
1023 subsys_initcall(pci_sun4v_init);