drm: Remove unused members from struct drm_open_hash
[pandora-kernel.git] / drivers / gpu / drm / nouveau / nouveau_sgdma.c
1 #include "drmP.h"
2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
9
10 struct nouveau_sgdma_be {
11         struct ttm_backend backend;
12         struct drm_device *dev;
13
14         dma_addr_t *pages;
15         unsigned nr_pages;
16
17         u64 offset;
18         bool bound;
19 };
20
21 static int
22 nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
23                        struct page **pages, struct page *dummy_read_page)
24 {
25         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
26         struct drm_device *dev = nvbe->dev;
27
28         NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
29
30         if (nvbe->pages)
31                 return -EINVAL;
32
33         nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
34         if (!nvbe->pages)
35                 return -ENOMEM;
36
37         nvbe->nr_pages = 0;
38         while (num_pages--) {
39                 nvbe->pages[nvbe->nr_pages] =
40                         pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
41                                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
42                 if (pci_dma_mapping_error(dev->pdev,
43                                           nvbe->pages[nvbe->nr_pages])) {
44                         be->func->clear(be);
45                         return -EFAULT;
46                 }
47
48                 nvbe->nr_pages++;
49         }
50
51         return 0;
52 }
53
54 static void
55 nouveau_sgdma_clear(struct ttm_backend *be)
56 {
57         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
58         struct drm_device *dev;
59
60         if (nvbe && nvbe->pages) {
61                 dev = nvbe->dev;
62                 NV_DEBUG(dev, "\n");
63
64                 if (nvbe->bound)
65                         be->func->unbind(be);
66
67                 while (nvbe->nr_pages--) {
68                         pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
69                                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
70                 }
71                 kfree(nvbe->pages);
72                 nvbe->pages = NULL;
73                 nvbe->nr_pages = 0;
74         }
75 }
76
77 static int
78 nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
79 {
80         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
81         struct drm_device *dev = nvbe->dev;
82         struct drm_nouveau_private *dev_priv = dev->dev_private;
83         struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
84         unsigned i, j, pte;
85
86         NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
87
88         nvbe->offset = mem->start << PAGE_SHIFT;
89         pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
90         for (i = 0; i < nvbe->nr_pages; i++) {
91                 dma_addr_t dma_offset = nvbe->pages[i];
92                 uint32_t offset_l = lower_32_bits(dma_offset);
93
94                 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
95                         nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
96                         dma_offset += NV_CTXDMA_PAGE_SIZE;
97                 }
98         }
99
100         nvbe->bound = true;
101         return 0;
102 }
103
104 static int
105 nouveau_sgdma_unbind(struct ttm_backend *be)
106 {
107         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
108         struct drm_device *dev = nvbe->dev;
109         struct drm_nouveau_private *dev_priv = dev->dev_private;
110         struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
111         unsigned i, j, pte;
112
113         NV_DEBUG(dev, "\n");
114
115         if (!nvbe->bound)
116                 return 0;
117
118         pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
119         for (i = 0; i < nvbe->nr_pages; i++) {
120                 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
121                         nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
122         }
123
124         nvbe->bound = false;
125         return 0;
126 }
127
128 static void
129 nouveau_sgdma_destroy(struct ttm_backend *be)
130 {
131         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
132
133         if (be) {
134                 NV_DEBUG(nvbe->dev, "\n");
135
136                 if (nvbe) {
137                         if (nvbe->pages)
138                                 be->func->clear(be);
139                         kfree(nvbe);
140                 }
141         }
142 }
143
144 static int
145 nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
146 {
147         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
148         struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
149
150         nvbe->offset = mem->start << PAGE_SHIFT;
151
152         nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
153                           nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
154         nvbe->bound = true;
155         return 0;
156 }
157
158 static int
159 nv50_sgdma_unbind(struct ttm_backend *be)
160 {
161         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
162         struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
163
164         if (!nvbe->bound)
165                 return 0;
166
167         nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
168                             nvbe->nr_pages << PAGE_SHIFT);
169         nvbe->bound = false;
170         return 0;
171 }
172
173 static struct ttm_backend_func nouveau_sgdma_backend = {
174         .populate               = nouveau_sgdma_populate,
175         .clear                  = nouveau_sgdma_clear,
176         .bind                   = nouveau_sgdma_bind,
177         .unbind                 = nouveau_sgdma_unbind,
178         .destroy                = nouveau_sgdma_destroy
179 };
180
181 static struct ttm_backend_func nv50_sgdma_backend = {
182         .populate               = nouveau_sgdma_populate,
183         .clear                  = nouveau_sgdma_clear,
184         .bind                   = nv50_sgdma_bind,
185         .unbind                 = nv50_sgdma_unbind,
186         .destroy                = nouveau_sgdma_destroy
187 };
188
189 struct ttm_backend *
190 nouveau_sgdma_init_ttm(struct drm_device *dev)
191 {
192         struct drm_nouveau_private *dev_priv = dev->dev_private;
193         struct nouveau_sgdma_be *nvbe;
194
195         nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
196         if (!nvbe)
197                 return NULL;
198
199         nvbe->dev = dev;
200
201         if (dev_priv->card_type < NV_50)
202                 nvbe->backend.func = &nouveau_sgdma_backend;
203         else
204                 nvbe->backend.func = &nv50_sgdma_backend;
205         return &nvbe->backend;
206 }
207
208 int
209 nouveau_sgdma_init(struct drm_device *dev)
210 {
211         struct drm_nouveau_private *dev_priv = dev->dev_private;
212         struct nouveau_gpuobj *gpuobj = NULL;
213         uint32_t aper_size, obj_size;
214         int i, ret;
215
216         if (dev_priv->card_type < NV_50) {
217                 if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
218                         aper_size = 64 * 1024 * 1024;
219                 else
220                         aper_size = 512 * 1024 * 1024;
221
222                 obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
223                 obj_size += 8; /* ctxdma header */
224
225                 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
226                                               NVOBJ_FLAG_ZERO_ALLOC |
227                                               NVOBJ_FLAG_ZERO_FREE, &gpuobj);
228                 if (ret) {
229                         NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
230                         return ret;
231                 }
232
233                 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
234                                    (1 << 12) /* PT present */ |
235                                    (0 << 13) /* PT *not* linear */ |
236                                    (0 << 14) /* RW */ |
237                                    (2 << 16) /* PCI */);
238                 nv_wo32(gpuobj, 4, aper_size - 1);
239                 for (i = 2; i < 2 + (aper_size >> 12); i++)
240                         nv_wo32(gpuobj, i * 4, 0x00000000);
241
242                 dev_priv->gart_info.sg_ctxdma = gpuobj;
243                 dev_priv->gart_info.aper_base = 0;
244                 dev_priv->gart_info.aper_size = aper_size;
245         } else
246         if (dev_priv->chan_vm) {
247                 ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
248                                      12, NV_MEM_ACCESS_RW,
249                                      &dev_priv->gart_info.vma);
250                 if (ret)
251                         return ret;
252
253                 dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
254                 dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
255         }
256
257         dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
258         return 0;
259 }
260
261 void
262 nouveau_sgdma_takedown(struct drm_device *dev)
263 {
264         struct drm_nouveau_private *dev_priv = dev->dev_private;
265
266         nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
267         nouveau_vm_put(&dev_priv->gart_info.vma);
268 }
269
270 uint32_t
271 nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
272 {
273         struct drm_nouveau_private *dev_priv = dev->dev_private;
274         struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
275         int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
276
277         BUG_ON(dev_priv->card_type >= NV_50);
278
279         return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
280                 (offset & NV_CTXDMA_PAGE_MASK);
281 }