brcmsmac: rework of mac80211 .flush() callback operation
[pandora-kernel.git] / arch / s390 / pci / pci_dma.c
1 /*
2  * Copyright IBM Corp. 2012
3  *
4  * Author(s):
5  *   Jan Glauber <jang@linux.vnet.ibm.com>
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/pci.h>
14 #include <asm/pci_dma.h>
15
16 static enum zpci_ioat_dtype zpci_ioat_dt = ZPCI_IOTA_RTTO;
17
18 static struct kmem_cache *dma_region_table_cache;
19 static struct kmem_cache *dma_page_table_cache;
20
21 static unsigned long *dma_alloc_cpu_table(void)
22 {
23         unsigned long *table, *entry;
24
25         table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
26         if (!table)
27                 return NULL;
28
29         for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
30                 *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
31         return table;
32 }
33
34 static void dma_free_cpu_table(void *table)
35 {
36         kmem_cache_free(dma_region_table_cache, table);
37 }
38
39 static unsigned long *dma_alloc_page_table(void)
40 {
41         unsigned long *table, *entry;
42
43         table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
44         if (!table)
45                 return NULL;
46
47         for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
48                 *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
49         return table;
50 }
51
52 static void dma_free_page_table(void *table)
53 {
54         kmem_cache_free(dma_page_table_cache, table);
55 }
56
57 static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
58 {
59         unsigned long *sto;
60
61         if (reg_entry_isvalid(*entry))
62                 sto = get_rt_sto(*entry);
63         else {
64                 sto = dma_alloc_cpu_table();
65                 if (!sto)
66                         return NULL;
67
68                 set_rt_sto(entry, sto);
69                 validate_rt_entry(entry);
70                 entry_clr_protected(entry);
71         }
72         return sto;
73 }
74
75 static unsigned long *dma_get_page_table_origin(unsigned long *entry)
76 {
77         unsigned long *pto;
78
79         if (reg_entry_isvalid(*entry))
80                 pto = get_st_pto(*entry);
81         else {
82                 pto = dma_alloc_page_table();
83                 if (!pto)
84                         return NULL;
85                 set_st_pto(entry, pto);
86                 validate_st_entry(entry);
87                 entry_clr_protected(entry);
88         }
89         return pto;
90 }
91
92 static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
93 {
94         unsigned long *sto, *pto;
95         unsigned int rtx, sx, px;
96
97         rtx = calc_rtx(dma_addr);
98         sto = dma_get_seg_table_origin(&rto[rtx]);
99         if (!sto)
100                 return NULL;
101
102         sx = calc_sx(dma_addr);
103         pto = dma_get_page_table_origin(&sto[sx]);
104         if (!pto)
105                 return NULL;
106
107         px = calc_px(dma_addr);
108         return &pto[px];
109 }
110
111 static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
112                                  dma_addr_t dma_addr, int flags)
113 {
114         unsigned long *entry;
115
116         entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
117         if (!entry) {
118                 WARN_ON_ONCE(1);
119                 return;
120         }
121
122         if (flags & ZPCI_PTE_INVALID) {
123                 invalidate_pt_entry(entry);
124                 return;
125         } else {
126                 set_pt_pfaa(entry, page_addr);
127                 validate_pt_entry(entry);
128         }
129
130         if (flags & ZPCI_TABLE_PROTECTED)
131                 entry_set_protected(entry);
132         else
133                 entry_clr_protected(entry);
134 }
135
136 static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
137                             dma_addr_t dma_addr, size_t size, int flags)
138 {
139         unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
140         u8 *page_addr = (u8 *) (pa & PAGE_MASK);
141         dma_addr_t start_dma_addr = dma_addr;
142         unsigned long irq_flags;
143         int i, rc = 0;
144
145         if (!nr_pages)
146                 return -EINVAL;
147
148         spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
149         if (!zdev->dma_table) {
150                 dev_err(&zdev->pdev->dev, "Missing DMA table\n");
151                 goto no_refresh;
152         }
153
154         for (i = 0; i < nr_pages; i++) {
155                 dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
156                 page_addr += PAGE_SIZE;
157                 dma_addr += PAGE_SIZE;
158         }
159
160         /*
161          * rpcit is not required to establish new translations when previously
162          * invalid translation-table entries are validated, however it is
163          * required when altering previously valid entries.
164          */
165         if (!zdev->tlb_refresh &&
166             ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
167                 /*
168                  * TODO: also need to check that the old entry is indeed INVALID
169                  * and not only for one page but for the whole range...
170                  * -> now we WARN_ON in that case but with lazy unmap that
171                  * needs to be redone!
172                  */
173                 goto no_refresh;
174         rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr,
175                           nr_pages * PAGE_SIZE);
176
177 no_refresh:
178         spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
179         return rc;
180 }
181
182 static void dma_free_seg_table(unsigned long entry)
183 {
184         unsigned long *sto = get_rt_sto(entry);
185         int sx;
186
187         for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
188                 if (reg_entry_isvalid(sto[sx]))
189                         dma_free_page_table(get_st_pto(sto[sx]));
190
191         dma_free_cpu_table(sto);
192 }
193
194 static void dma_cleanup_tables(struct zpci_dev *zdev)
195 {
196         unsigned long *table;
197         int rtx;
198
199         if (!zdev || !zdev->dma_table)
200                 return;
201
202         table = zdev->dma_table;
203         for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
204                 if (reg_entry_isvalid(table[rtx]))
205                         dma_free_seg_table(table[rtx]);
206
207         dma_free_cpu_table(table);
208         zdev->dma_table = NULL;
209 }
210
211 static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
212                                    int size)
213 {
214         unsigned long boundary_size = 0x1000000;
215
216         return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
217                                 start, size, 0, boundary_size, 0);
218 }
219
220 static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
221 {
222         unsigned long offset, flags;
223
224         spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
225         offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
226         if (offset == -1)
227                 offset = __dma_alloc_iommu(zdev, 0, size);
228
229         if (offset != -1) {
230                 zdev->next_bit = offset + size;
231                 if (zdev->next_bit >= zdev->iommu_pages)
232                         zdev->next_bit = 0;
233         }
234         spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
235         return offset;
236 }
237
238 static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
239 {
240         unsigned long flags;
241
242         spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
243         if (!zdev->iommu_bitmap)
244                 goto out;
245         bitmap_clear(zdev->iommu_bitmap, offset, size);
246         if (offset >= zdev->next_bit)
247                 zdev->next_bit = offset + size;
248 out:
249         spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
250 }
251
252 int dma_set_mask(struct device *dev, u64 mask)
253 {
254         if (!dev->dma_mask || !dma_supported(dev, mask))
255                 return -EIO;
256
257         *dev->dma_mask = mask;
258         return 0;
259 }
260 EXPORT_SYMBOL_GPL(dma_set_mask);
261
262 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
263                                      unsigned long offset, size_t size,
264                                      enum dma_data_direction direction,
265                                      struct dma_attrs *attrs)
266 {
267         struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
268         unsigned long nr_pages, iommu_page_index;
269         unsigned long pa = page_to_phys(page) + offset;
270         int flags = ZPCI_PTE_VALID;
271         dma_addr_t dma_addr;
272
273         WARN_ON_ONCE(offset > PAGE_SIZE);
274
275         /* This rounds up number of pages based on size and offset */
276         nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
277         iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
278         if (iommu_page_index == -1)
279                 goto out_err;
280
281         /* Use rounded up size */
282         size = nr_pages * PAGE_SIZE;
283
284         dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
285         if (dma_addr + size > zdev->end_dma) {
286                 dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
287                          dma_addr, size, zdev->end_dma);
288                 goto out_free;
289         }
290
291         if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
292                 flags |= ZPCI_TABLE_PROTECTED;
293
294         if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
295                 atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
296                 return dma_addr + offset;
297         }
298
299 out_free:
300         dma_free_iommu(zdev, iommu_page_index, nr_pages);
301 out_err:
302         dev_err(dev, "Failed to map addr: %lx\n", pa);
303         return DMA_ERROR_CODE;
304 }
305
306 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
307                                  size_t size, enum dma_data_direction direction,
308                                  struct dma_attrs *attrs)
309 {
310         struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
311         unsigned long iommu_page_index;
312         int npages;
313
314         npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
315         dma_addr = dma_addr & PAGE_MASK;
316         if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
317                              ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
318                 dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
319
320         atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
321         iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
322         dma_free_iommu(zdev, iommu_page_index, npages);
323 }
324
325 static void *s390_dma_alloc(struct device *dev, size_t size,
326                             dma_addr_t *dma_handle, gfp_t flag,
327                             struct dma_attrs *attrs)
328 {
329         struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
330         struct page *page;
331         unsigned long pa;
332         dma_addr_t map;
333
334         size = PAGE_ALIGN(size);
335         page = alloc_pages(flag, get_order(size));
336         if (!page)
337                 return NULL;
338
339         atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages);
340         pa = page_to_phys(page);
341         memset((void *) pa, 0, size);
342
343         map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
344                                  size, DMA_BIDIRECTIONAL, NULL);
345         if (dma_mapping_error(dev, map)) {
346                 free_pages(pa, get_order(size));
347                 return NULL;
348         }
349
350         if (dma_handle)
351                 *dma_handle = map;
352         return (void *) pa;
353 }
354
355 static void s390_dma_free(struct device *dev, size_t size,
356                           void *pa, dma_addr_t dma_handle,
357                           struct dma_attrs *attrs)
358 {
359         s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size),
360                              DMA_BIDIRECTIONAL, NULL);
361         free_pages((unsigned long) pa, get_order(size));
362 }
363
364 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
365                            int nr_elements, enum dma_data_direction dir,
366                            struct dma_attrs *attrs)
367 {
368         int mapped_elements = 0;
369         struct scatterlist *s;
370         int i;
371
372         for_each_sg(sg, s, nr_elements, i) {
373                 struct page *page = sg_page(s);
374                 s->dma_address = s390_dma_map_pages(dev, page, s->offset,
375                                                     s->length, dir, NULL);
376                 if (!dma_mapping_error(dev, s->dma_address)) {
377                         s->dma_length = s->length;
378                         mapped_elements++;
379                 } else
380                         goto unmap;
381         }
382 out:
383         return mapped_elements;
384
385 unmap:
386         for_each_sg(sg, s, mapped_elements, i) {
387                 if (s->dma_address)
388                         s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
389                                              dir, NULL);
390                 s->dma_address = 0;
391                 s->dma_length = 0;
392         }
393         mapped_elements = 0;
394         goto out;
395 }
396
397 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
398                               int nr_elements, enum dma_data_direction dir,
399                               struct dma_attrs *attrs)
400 {
401         struct scatterlist *s;
402         int i;
403
404         for_each_sg(sg, s, nr_elements, i) {
405                 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
406                 s->dma_address = 0;
407                 s->dma_length = 0;
408         }
409 }
410
411 int zpci_dma_init_device(struct zpci_dev *zdev)
412 {
413         unsigned int bitmap_order;
414         int rc;
415
416         spin_lock_init(&zdev->iommu_bitmap_lock);
417         spin_lock_init(&zdev->dma_table_lock);
418
419         zdev->dma_table = dma_alloc_cpu_table();
420         if (!zdev->dma_table) {
421                 rc = -ENOMEM;
422                 goto out_clean;
423         }
424
425         zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
426         zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
427         bitmap_order = get_order(zdev->iommu_pages / 8);
428         pr_info("iommu_size: 0x%lx  iommu_pages: 0x%lx  bitmap_order: %i\n",
429                  zdev->iommu_size, zdev->iommu_pages, bitmap_order);
430
431         zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
432                                                        bitmap_order);
433         if (!zdev->iommu_bitmap) {
434                 rc = -ENOMEM;
435                 goto out_reg;
436         }
437
438         rc = zpci_register_ioat(zdev,
439                                 0,
440                                 zdev->start_dma + PAGE_OFFSET,
441                                 zdev->start_dma + zdev->iommu_size - 1,
442                                 (u64) zdev->dma_table);
443         if (rc)
444                 goto out_reg;
445         return 0;
446
447 out_reg:
448         dma_free_cpu_table(zdev->dma_table);
449 out_clean:
450         return rc;
451 }
452
453 void zpci_dma_exit_device(struct zpci_dev *zdev)
454 {
455         zpci_unregister_ioat(zdev, 0);
456         dma_cleanup_tables(zdev);
457         free_pages((unsigned long) zdev->iommu_bitmap,
458                    get_order(zdev->iommu_pages / 8));
459         zdev->iommu_bitmap = NULL;
460         zdev->next_bit = 0;
461 }
462
463 static int __init dma_alloc_cpu_table_caches(void)
464 {
465         dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
466                                         ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
467                                         0, NULL);
468         if (!dma_region_table_cache)
469                 return -ENOMEM;
470
471         dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
472                                         ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
473                                         0, NULL);
474         if (!dma_page_table_cache) {
475                 kmem_cache_destroy(dma_region_table_cache);
476                 return -ENOMEM;
477         }
478         return 0;
479 }
480
481 int __init zpci_dma_init(void)
482 {
483         return dma_alloc_cpu_table_caches();
484 }
485
486 void zpci_dma_exit(void)
487 {
488         kmem_cache_destroy(dma_page_table_cache);
489         kmem_cache_destroy(dma_region_table_cache);
490 }
491
492 #define PREALLOC_DMA_DEBUG_ENTRIES      (1 << 16)
493
494 static int __init dma_debug_do_init(void)
495 {
496         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
497         return 0;
498 }
499 fs_initcall(dma_debug_do_init);
500
501 struct dma_map_ops s390_dma_ops = {
502         .alloc          = s390_dma_alloc,
503         .free           = s390_dma_free,
504         .map_sg         = s390_dma_map_sg,
505         .unmap_sg       = s390_dma_unmap_sg,
506         .map_page       = s390_dma_map_pages,
507         .unmap_page     = s390_dma_unmap_pages,
508         /* if we support direct DMA this must be conditional */
509         .is_phys        = 0,
510         /* dma_supported is unconditionally true without a callback */
511 };
512 EXPORT_SYMBOL_GPL(s390_dma_ops);