Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[pandora-kernel.git] / arch / powerpc / kernel / iommu.c
1 /*
2  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3  * 
4  * Rewrite, cleanup, new allocation schemes, virtual merging: 
5  * Copyright (C) 2004 Olof Johansson, IBM Corporation
6  *               and  Ben. Herrenschmidt, IBM Corporation
7  *
8  * Dynamic DMA mapping support, bus-independent parts.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  * 
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  * 
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
23  */
24
25
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitmap.h>
34 #include <linux/iommu-helper.h>
35 #include <linux/crash_dump.h>
36 #include <asm/io.h>
37 #include <asm/prom.h>
38 #include <asm/iommu.h>
39 #include <asm/pci-bridge.h>
40 #include <asm/machdep.h>
41 #include <asm/kdump.h>
42
43 #define DBG(...)
44
45 static int novmerge;
46
47 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
48
49 static int __init setup_iommu(char *str)
50 {
51         if (!strcmp(str, "novmerge"))
52                 novmerge = 1;
53         else if (!strcmp(str, "vmerge"))
54                 novmerge = 0;
55         return 1;
56 }
57
58 __setup("iommu=", setup_iommu);
59
60 static unsigned long iommu_range_alloc(struct device *dev,
61                                        struct iommu_table *tbl,
62                                        unsigned long npages,
63                                        unsigned long *handle,
64                                        unsigned long mask,
65                                        unsigned int align_order)
66
67         unsigned long n, end, start;
68         unsigned long limit;
69         int largealloc = npages > 15;
70         int pass = 0;
71         unsigned long align_mask;
72         unsigned long boundary_size;
73
74         align_mask = 0xffffffffffffffffl >> (64 - align_order);
75
76         /* This allocator was derived from x86_64's bit string search */
77
78         /* Sanity check */
79         if (unlikely(npages == 0)) {
80                 if (printk_ratelimit())
81                         WARN_ON(1);
82                 return DMA_ERROR_CODE;
83         }
84
85         if (handle && *handle)
86                 start = *handle;
87         else
88                 start = largealloc ? tbl->it_largehint : tbl->it_hint;
89
90         /* Use only half of the table for small allocs (15 pages or less) */
91         limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
92
93         if (largealloc && start < tbl->it_halfpoint)
94                 start = tbl->it_halfpoint;
95
96         /* The case below can happen if we have a small segment appended
97          * to a large, or when the previous alloc was at the very end of
98          * the available space. If so, go back to the initial start.
99          */
100         if (start >= limit)
101                 start = largealloc ? tbl->it_largehint : tbl->it_hint;
102
103  again:
104
105         if (limit + tbl->it_offset > mask) {
106                 limit = mask - tbl->it_offset + 1;
107                 /* If we're constrained on address range, first try
108                  * at the masked hint to avoid O(n) search complexity,
109                  * but on second pass, start at 0.
110                  */
111                 if ((start & mask) >= limit || pass > 0)
112                         start = 0;
113                 else
114                         start &= mask;
115         }
116
117         if (dev)
118                 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
119                                       1 << IOMMU_PAGE_SHIFT);
120         else
121                 boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
122         /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
123
124         n = iommu_area_alloc(tbl->it_map, limit, start, npages,
125                              tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
126                              align_mask);
127         if (n == -1) {
128                 if (likely(pass < 2)) {
129                         /* First failure, just rescan the half of the table.
130                          * Second failure, rescan the other half of the table.
131                          */
132                         start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
133                         limit = pass ? tbl->it_size : limit;
134                         pass++;
135                         goto again;
136                 } else {
137                         /* Third failure, give up */
138                         return DMA_ERROR_CODE;
139                 }
140         }
141
142         end = n + npages;
143
144         /* Bump the hint to a new block for small allocs. */
145         if (largealloc) {
146                 /* Don't bump to new block to avoid fragmentation */
147                 tbl->it_largehint = end;
148         } else {
149                 /* Overflow will be taken care of at the next allocation */
150                 tbl->it_hint = (end + tbl->it_blocksize - 1) &
151                                 ~(tbl->it_blocksize - 1);
152         }
153
154         /* Update handle for SG allocations */
155         if (handle)
156                 *handle = end;
157
158         return n;
159 }
160
161 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
162                               void *page, unsigned int npages,
163                               enum dma_data_direction direction,
164                               unsigned long mask, unsigned int align_order,
165                               struct dma_attrs *attrs)
166 {
167         unsigned long entry, flags;
168         dma_addr_t ret = DMA_ERROR_CODE;
169         int build_fail;
170
171         spin_lock_irqsave(&(tbl->it_lock), flags);
172
173         entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
174
175         if (unlikely(entry == DMA_ERROR_CODE)) {
176                 spin_unlock_irqrestore(&(tbl->it_lock), flags);
177                 return DMA_ERROR_CODE;
178         }
179
180         entry += tbl->it_offset;        /* Offset into real TCE table */
181         ret = entry << IOMMU_PAGE_SHIFT;        /* Set the return dma address */
182
183         /* Put the TCEs in the HW table */
184         build_fail = ppc_md.tce_build(tbl, entry, npages,
185                                       (unsigned long)page & IOMMU_PAGE_MASK,
186                                       direction, attrs);
187
188         /* ppc_md.tce_build() only returns non-zero for transient errors.
189          * Clean up the table bitmap in this case and return
190          * DMA_ERROR_CODE. For all other errors the functionality is
191          * not altered.
192          */
193         if (unlikely(build_fail)) {
194                 __iommu_free(tbl, ret, npages);
195
196                 spin_unlock_irqrestore(&(tbl->it_lock), flags);
197                 return DMA_ERROR_CODE;
198         }
199
200         /* Flush/invalidate TLB caches if necessary */
201         if (ppc_md.tce_flush)
202                 ppc_md.tce_flush(tbl);
203
204         spin_unlock_irqrestore(&(tbl->it_lock), flags);
205
206         /* Make sure updates are seen by hardware */
207         mb();
208
209         return ret;
210 }
211
212 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 
213                          unsigned int npages)
214 {
215         unsigned long entry, free_entry;
216
217         entry = dma_addr >> IOMMU_PAGE_SHIFT;
218         free_entry = entry - tbl->it_offset;
219
220         if (((free_entry + npages) > tbl->it_size) ||
221             (entry < tbl->it_offset)) {
222                 if (printk_ratelimit()) {
223                         printk(KERN_INFO "iommu_free: invalid entry\n");
224                         printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
225                         printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
226                         printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
227                         printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
228                         printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
229                         printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
230                         printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
231                         WARN_ON(1);
232                 }
233                 return;
234         }
235
236         ppc_md.tce_free(tbl, entry, npages);
237         bitmap_clear(tbl->it_map, free_entry, npages);
238 }
239
240 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
241                 unsigned int npages)
242 {
243         unsigned long flags;
244
245         spin_lock_irqsave(&(tbl->it_lock), flags);
246
247         __iommu_free(tbl, dma_addr, npages);
248
249         /* Make sure TLB cache is flushed if the HW needs it. We do
250          * not do an mb() here on purpose, it is not needed on any of
251          * the current platforms.
252          */
253         if (ppc_md.tce_flush)
254                 ppc_md.tce_flush(tbl);
255
256         spin_unlock_irqrestore(&(tbl->it_lock), flags);
257 }
258
259 int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
260                  struct scatterlist *sglist, int nelems,
261                  unsigned long mask, enum dma_data_direction direction,
262                  struct dma_attrs *attrs)
263 {
264         dma_addr_t dma_next = 0, dma_addr;
265         unsigned long flags;
266         struct scatterlist *s, *outs, *segstart;
267         int outcount, incount, i, build_fail = 0;
268         unsigned int align;
269         unsigned long handle;
270         unsigned int max_seg_size;
271
272         BUG_ON(direction == DMA_NONE);
273
274         if ((nelems == 0) || !tbl)
275                 return 0;
276
277         outs = s = segstart = &sglist[0];
278         outcount = 1;
279         incount = nelems;
280         handle = 0;
281
282         /* Init first segment length for backout at failure */
283         outs->dma_length = 0;
284
285         DBG("sg mapping %d elements:\n", nelems);
286
287         spin_lock_irqsave(&(tbl->it_lock), flags);
288
289         max_seg_size = dma_get_max_seg_size(dev);
290         for_each_sg(sglist, s, nelems, i) {
291                 unsigned long vaddr, npages, entry, slen;
292
293                 slen = s->length;
294                 /* Sanity check */
295                 if (slen == 0) {
296                         dma_next = 0;
297                         continue;
298                 }
299                 /* Allocate iommu entries for that segment */
300                 vaddr = (unsigned long) sg_virt(s);
301                 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
302                 align = 0;
303                 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
304                     (vaddr & ~PAGE_MASK) == 0)
305                         align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
306                 entry = iommu_range_alloc(dev, tbl, npages, &handle,
307                                           mask >> IOMMU_PAGE_SHIFT, align);
308
309                 DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
310
311                 /* Handle failure */
312                 if (unlikely(entry == DMA_ERROR_CODE)) {
313                         if (printk_ratelimit())
314                                 dev_info(dev, "iommu_alloc failed, tbl %p "
315                                          "vaddr %lx npages %lu\n", tbl, vaddr,
316                                          npages);
317                         goto failure;
318                 }
319
320                 /* Convert entry to a dma_addr_t */
321                 entry += tbl->it_offset;
322                 dma_addr = entry << IOMMU_PAGE_SHIFT;
323                 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
324
325                 DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
326                             npages, entry, dma_addr);
327
328                 /* Insert into HW table */
329                 build_fail = ppc_md.tce_build(tbl, entry, npages,
330                                               vaddr & IOMMU_PAGE_MASK,
331                                               direction, attrs);
332                 if(unlikely(build_fail))
333                         goto failure;
334
335                 /* If we are in an open segment, try merging */
336                 if (segstart != s) {
337                         DBG("  - trying merge...\n");
338                         /* We cannot merge if:
339                          * - allocated dma_addr isn't contiguous to previous allocation
340                          */
341                         if (novmerge || (dma_addr != dma_next) ||
342                             (outs->dma_length + s->length > max_seg_size)) {
343                                 /* Can't merge: create a new segment */
344                                 segstart = s;
345                                 outcount++;
346                                 outs = sg_next(outs);
347                                 DBG("    can't merge, new segment.\n");
348                         } else {
349                                 outs->dma_length += s->length;
350                                 DBG("    merged, new len: %ux\n", outs->dma_length);
351                         }
352                 }
353
354                 if (segstart == s) {
355                         /* This is a new segment, fill entries */
356                         DBG("  - filling new segment.\n");
357                         outs->dma_address = dma_addr;
358                         outs->dma_length = slen;
359                 }
360
361                 /* Calculate next page pointer for contiguous check */
362                 dma_next = dma_addr + slen;
363
364                 DBG("  - dma next is: %lx\n", dma_next);
365         }
366
367         /* Flush/invalidate TLB caches if necessary */
368         if (ppc_md.tce_flush)
369                 ppc_md.tce_flush(tbl);
370
371         spin_unlock_irqrestore(&(tbl->it_lock), flags);
372
373         DBG("mapped %d elements:\n", outcount);
374
375         /* For the sake of iommu_unmap_sg, we clear out the length in the
376          * next entry of the sglist if we didn't fill the list completely
377          */
378         if (outcount < incount) {
379                 outs = sg_next(outs);
380                 outs->dma_address = DMA_ERROR_CODE;
381                 outs->dma_length = 0;
382         }
383
384         /* Make sure updates are seen by hardware */
385         mb();
386
387         return outcount;
388
389  failure:
390         for_each_sg(sglist, s, nelems, i) {
391                 if (s->dma_length != 0) {
392                         unsigned long vaddr, npages;
393
394                         vaddr = s->dma_address & IOMMU_PAGE_MASK;
395                         npages = iommu_num_pages(s->dma_address, s->dma_length,
396                                                  IOMMU_PAGE_SIZE);
397                         __iommu_free(tbl, vaddr, npages);
398                         s->dma_address = DMA_ERROR_CODE;
399                         s->dma_length = 0;
400                 }
401                 if (s == outs)
402                         break;
403         }
404         spin_unlock_irqrestore(&(tbl->it_lock), flags);
405         return 0;
406 }
407
408
409 void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
410                 int nelems, enum dma_data_direction direction,
411                 struct dma_attrs *attrs)
412 {
413         struct scatterlist *sg;
414         unsigned long flags;
415
416         BUG_ON(direction == DMA_NONE);
417
418         if (!tbl)
419                 return;
420
421         spin_lock_irqsave(&(tbl->it_lock), flags);
422
423         sg = sglist;
424         while (nelems--) {
425                 unsigned int npages;
426                 dma_addr_t dma_handle = sg->dma_address;
427
428                 if (sg->dma_length == 0)
429                         break;
430                 npages = iommu_num_pages(dma_handle, sg->dma_length,
431                                          IOMMU_PAGE_SIZE);
432                 __iommu_free(tbl, dma_handle, npages);
433                 sg = sg_next(sg);
434         }
435
436         /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
437          * do not do an mb() here, the affected platforms do not need it
438          * when freeing.
439          */
440         if (ppc_md.tce_flush)
441                 ppc_md.tce_flush(tbl);
442
443         spin_unlock_irqrestore(&(tbl->it_lock), flags);
444 }
445
446 static void iommu_table_clear(struct iommu_table *tbl)
447 {
448         if (!is_kdump_kernel()) {
449                 /* Clear the table in case firmware left allocations in it */
450                 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
451                 return;
452         }
453
454 #ifdef CONFIG_CRASH_DUMP
455         if (ppc_md.tce_get) {
456                 unsigned long index, tceval, tcecount = 0;
457
458                 /* Reserve the existing mappings left by the first kernel. */
459                 for (index = 0; index < tbl->it_size; index++) {
460                         tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
461                         /*
462                          * Freed TCE entry contains 0x7fffffffffffffff on JS20
463                          */
464                         if (tceval && (tceval != 0x7fffffffffffffffUL)) {
465                                 __set_bit(index, tbl->it_map);
466                                 tcecount++;
467                         }
468                 }
469
470                 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
471                         printk(KERN_WARNING "TCE table is full; freeing ");
472                         printk(KERN_WARNING "%d entries for the kdump boot\n",
473                                 KDUMP_MIN_TCE_ENTRIES);
474                         for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
475                                 index < tbl->it_size; index++)
476                                 __clear_bit(index, tbl->it_map);
477                 }
478         }
479 #endif
480 }
481
482 /*
483  * Build a iommu_table structure.  This contains a bit map which
484  * is used to manage allocation of the tce space.
485  */
486 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
487 {
488         unsigned long sz;
489         static int welcomed = 0;
490         struct page *page;
491
492         /* Set aside 1/4 of the table for large allocations. */
493         tbl->it_halfpoint = tbl->it_size * 3 / 4;
494
495         /* number of bytes needed for the bitmap */
496         sz = (tbl->it_size + 7) >> 3;
497
498         page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
499         if (!page)
500                 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
501         tbl->it_map = page_address(page);
502         memset(tbl->it_map, 0, sz);
503
504         tbl->it_hint = 0;
505         tbl->it_largehint = tbl->it_halfpoint;
506         spin_lock_init(&tbl->it_lock);
507
508         iommu_table_clear(tbl);
509
510         if (!welcomed) {
511                 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
512                        novmerge ? "disabled" : "enabled");
513                 welcomed = 1;
514         }
515
516         return tbl;
517 }
518
519 void iommu_free_table(struct iommu_table *tbl, const char *node_name)
520 {
521         unsigned long bitmap_sz, i;
522         unsigned int order;
523
524         if (!tbl || !tbl->it_map) {
525                 printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
526                                 node_name);
527                 return;
528         }
529
530         /* verify that table contains no entries */
531         /* it_size is in entries, and we're examining 64 at a time */
532         for (i = 0; i < (tbl->it_size/64); i++) {
533                 if (tbl->it_map[i] != 0) {
534                         printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
535                                 __func__, node_name);
536                         break;
537                 }
538         }
539
540         /* calculate bitmap size in bytes */
541         bitmap_sz = (tbl->it_size + 7) / 8;
542
543         /* free bitmap */
544         order = get_order(bitmap_sz);
545         free_pages((unsigned long) tbl->it_map, order);
546
547         /* free table */
548         kfree(tbl);
549 }
550
551 /* Creates TCEs for a user provided buffer.  The user buffer must be
552  * contiguous real kernel storage (not vmalloc).  The address passed here
553  * comprises a page address and offset into that page. The dma_addr_t
554  * returned will point to the same byte within the page as was passed in.
555  */
556 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
557                           struct page *page, unsigned long offset, size_t size,
558                           unsigned long mask, enum dma_data_direction direction,
559                           struct dma_attrs *attrs)
560 {
561         dma_addr_t dma_handle = DMA_ERROR_CODE;
562         void *vaddr;
563         unsigned long uaddr;
564         unsigned int npages, align;
565
566         BUG_ON(direction == DMA_NONE);
567
568         vaddr = page_address(page) + offset;
569         uaddr = (unsigned long)vaddr;
570         npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
571
572         if (tbl) {
573                 align = 0;
574                 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
575                     ((unsigned long)vaddr & ~PAGE_MASK) == 0)
576                         align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
577
578                 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
579                                          mask >> IOMMU_PAGE_SHIFT, align,
580                                          attrs);
581                 if (dma_handle == DMA_ERROR_CODE) {
582                         if (printk_ratelimit())  {
583                                 dev_info(dev, "iommu_alloc failed, tbl %p "
584                                          "vaddr %p npages %d\n", tbl, vaddr,
585                                          npages);
586                         }
587                 } else
588                         dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
589         }
590
591         return dma_handle;
592 }
593
594 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
595                       size_t size, enum dma_data_direction direction,
596                       struct dma_attrs *attrs)
597 {
598         unsigned int npages;
599
600         BUG_ON(direction == DMA_NONE);
601
602         if (tbl) {
603                 npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
604                 iommu_free(tbl, dma_handle, npages);
605         }
606 }
607
608 /* Allocates a contiguous real buffer and creates mappings over it.
609  * Returns the virtual address of the buffer and sets dma_handle
610  * to the dma address (mapping) of the first page.
611  */
612 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
613                            size_t size, dma_addr_t *dma_handle,
614                            unsigned long mask, gfp_t flag, int node)
615 {
616         void *ret = NULL;
617         dma_addr_t mapping;
618         unsigned int order;
619         unsigned int nio_pages, io_order;
620         struct page *page;
621
622         size = PAGE_ALIGN(size);
623         order = get_order(size);
624
625         /*
626          * Client asked for way too much space.  This is checked later
627          * anyway.  It is easier to debug here for the drivers than in
628          * the tce tables.
629          */
630         if (order >= IOMAP_MAX_ORDER) {
631                 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
632                          size);
633                 return NULL;
634         }
635
636         if (!tbl)
637                 return NULL;
638
639         /* Alloc enough pages (and possibly more) */
640         page = alloc_pages_node(node, flag, order);
641         if (!page)
642                 return NULL;
643         ret = page_address(page);
644         memset(ret, 0, size);
645
646         /* Set up tces to cover the allocated range */
647         nio_pages = size >> IOMMU_PAGE_SHIFT;
648         io_order = get_iommu_order(size);
649         mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
650                               mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
651         if (mapping == DMA_ERROR_CODE) {
652                 free_pages((unsigned long)ret, order);
653                 return NULL;
654         }
655         *dma_handle = mapping;
656         return ret;
657 }
658
659 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
660                          void *vaddr, dma_addr_t dma_handle)
661 {
662         if (tbl) {
663                 unsigned int nio_pages;
664
665                 size = PAGE_ALIGN(size);
666                 nio_pages = size >> IOMMU_PAGE_SHIFT;
667                 iommu_free(tbl, dma_handle, nio_pages);
668                 size = PAGE_ALIGN(size);
669                 free_pages((unsigned long)vaddr, get_order(size));
670         }
671 }