staging: gma500: delete the RAR handling
[pandora-kernel.git] / drivers / staging / gma500 / psb_gtt.c
1 /*
2  * Copyright (c) 2007, Intel Corporation.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
19  */
20
21 #include <drm/drmP.h>
22 #include "psb_drv.h"
23 #include "psb_pvr_glue.h"
24
25 static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
26 {
27         uint32_t mask = PSB_PTE_VALID;
28
29         if (type & PSB_MMU_CACHED_MEMORY)
30                 mask |= PSB_PTE_CACHED;
31         if (type & PSB_MMU_RO_MEMORY)
32                 mask |= PSB_PTE_RO;
33         if (type & PSB_MMU_WO_MEMORY)
34                 mask |= PSB_PTE_WO;
35
36         return (pfn << PAGE_SHIFT) | mask;
37 }
38
39 struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
40 {
41         struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
42
43         if (!tmp)
44                 return NULL;
45
46         init_rwsem(&tmp->sem);
47         tmp->dev = dev;
48
49         return tmp;
50 }
51
52 void psb_gtt_takedown(struct psb_gtt *pg, int free)
53 {
54         struct drm_psb_private *dev_priv = pg->dev->dev_private;
55
56         if (!pg)
57                 return;
58
59         if (pg->gtt_map) {
60                 iounmap(pg->gtt_map);
61                 pg->gtt_map = NULL;
62         }
63         if (pg->initialized) {
64                 pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
65                                       pg->gmch_ctrl);
66                 PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
67                 (void) PSB_RVDC32(PSB_PGETBL_CTL);
68         }
69         if (free)
70                 kfree(pg);
71 }
72
73 int psb_gtt_init(struct psb_gtt *pg, int resume)
74 {
75         struct drm_device *dev = pg->dev;
76         struct drm_psb_private *dev_priv = dev->dev_private;
77         unsigned gtt_pages;
78         unsigned long stolen_size, vram_stolen_size, ci_stolen_size;
79         unsigned i, num_pages;
80         unsigned pfn_base;
81         uint32_t ci_pages, vram_pages;
82         uint32_t tt_pages;
83         uint32_t *ttm_gtt_map;
84         uint32_t dvmt_mode = 0;
85
86         int ret = 0;
87         uint32_t pte;
88
89         pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
90         pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
91                               pg->gmch_ctrl | _PSB_GMCH_ENABLED);
92
93         pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
94         PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
95         (void) PSB_RVDC32(PSB_PGETBL_CTL);
96
97         pg->initialized = 1;
98
99         pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
100
101         pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
102         /* fix me: video mmu has hw bug to access 0x0D0000000,
103          * then make gatt start at 0x0e000,0000 */
104         pg->mmu_gatt_start = PSB_MEM_TT_START;
105         pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
106         gtt_pages =
107             pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
108         pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
109             >> PAGE_SHIFT;
110
111         pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
112         vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
113
114         /* CI is not included in the stolen size since the TOPAZ MMU bug */
115         ci_stolen_size = dev_priv->ci_region_size;
116         /* Don't add CI & RAR share buffer space
117          * managed by TTM to stolen_size */
118         stolen_size = vram_stolen_size;
119
120         printk(KERN_INFO"GMMADR(region 0) start: 0x%08x (%dM).\n",
121                 pg->gatt_start, pg->gatt_pages/256);
122         printk(KERN_INFO"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
123                 pg->gtt_start, gtt_pages * 4, pg->gtt_phys_start);
124         printk(KERN_INFO "Stole memory information\n");
125         printk(KERN_INFO "      base in RAM: 0x%x\n", pg->stolen_base);
126         printk(KERN_INFO "      size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
127                 vram_stolen_size/1024);
128         dvmt_mode = (pg->gmch_ctrl >> 4) & 0x7;
129         printk(KERN_INFO "      the correct size should be: %dM(dvmt mode=%d)\n",
130                 (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
131
132         if (ci_stolen_size > 0)
133                 printk(KERN_INFO"CI Stole memory: RAM base = 0x%08x, size = %lu M\n",
134                                 dev_priv->ci_region_start,
135                                 ci_stolen_size / 1024 / 1024);
136
137         if (resume && (gtt_pages != pg->gtt_pages) &&
138             (stolen_size != pg->stolen_size)) {
139                 DRM_ERROR("GTT resume error.\n");
140                 ret = -EINVAL;
141                 goto out_err;
142         }
143
144         pg->gtt_pages = gtt_pages;
145         pg->stolen_size = stolen_size;
146         pg->vram_stolen_size = vram_stolen_size;
147         pg->ci_stolen_size = ci_stolen_size;
148         pg->gtt_map =
149             ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
150         if (!pg->gtt_map) {
151                 DRM_ERROR("Failure to map gtt.\n");
152                 ret = -ENOMEM;
153                 goto out_err;
154         }
155
156         pg->vram_addr = ioremap_wc(pg->stolen_base, stolen_size);
157         if (!pg->vram_addr) {
158                 DRM_ERROR("Failure to map stolen base.\n");
159                 ret = -ENOMEM;
160                 goto out_err;
161         }
162
163         DRM_DEBUG("%s: vram kernel virtual address %p\n", pg->vram_addr);
164
165         tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
166                 (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
167
168         ttm_gtt_map = pg->gtt_map + tt_pages / 2;
169
170         /*
171          * insert vram stolen pages.
172          */
173
174         pfn_base = pg->stolen_base >> PAGE_SHIFT;
175         vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
176         printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
177                 num_pages, pfn_base, 0);
178         for (i = 0; i < num_pages; ++i) {
179                 pte = psb_gtt_mask_pte(pfn_base + i, 0);
180                 iowrite32(pte, pg->gtt_map + i);
181         }
182
183         /*
184          * Init rest of gtt managed by IMG.
185          */
186         pfn_base = page_to_pfn(dev_priv->scratch_page);
187         pte = psb_gtt_mask_pte(pfn_base, 0);
188         for (; i < tt_pages / 2 - 1; ++i)
189                 iowrite32(pte, pg->gtt_map + i);
190
191         /*
192          * insert CI stolen pages
193          */
194
195         pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT;
196         ci_pages = num_pages = ci_stolen_size >> PAGE_SHIFT;
197         printk(KERN_INFO"Set up %d CI stolen pages starting at 0x%08x, GTT offset %dK\n",
198                num_pages, pfn_base, (ttm_gtt_map - pg->gtt_map) * 4);
199         for (i = 0; i < num_pages; ++i) {
200                 pte = psb_gtt_mask_pte(pfn_base + i, 0);
201                 iowrite32(pte, ttm_gtt_map + i);
202         }
203
204         /*
205          * Init rest of gtt managed by TTM.
206          */
207
208         pfn_base = page_to_pfn(dev_priv->scratch_page);
209         pte = psb_gtt_mask_pte(pfn_base, 0);
210         PSB_DEBUG_INIT("Initializing the rest of a total "
211                        "of %d gtt pages.\n", pg->gatt_pages);
212
213         for (; i < pg->gatt_pages - tt_pages / 2; ++i)
214                 iowrite32(pte, ttm_gtt_map + i);
215         (void) ioread32(pg->gtt_map + i - 1);
216
217         return 0;
218
219 out_err:
220         psb_gtt_takedown(pg, 0);
221         return ret;
222 }
223
224 int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
225                          unsigned offset_pages, unsigned num_pages,
226                          unsigned desired_tile_stride,
227                          unsigned hw_tile_stride, int type)
228 {
229         unsigned rows = 1;
230         unsigned add;
231         unsigned row_add;
232         unsigned i;
233         unsigned j;
234         uint32_t *cur_page = NULL;
235         uint32_t pte;
236
237         if (hw_tile_stride)
238                 rows = num_pages / desired_tile_stride;
239         else
240                 desired_tile_stride = num_pages;
241
242         add = desired_tile_stride;
243         row_add = hw_tile_stride;
244
245         down_read(&pg->sem);
246         for (i = 0; i < rows; ++i) {
247                 cur_page = pg->gtt_map + offset_pages;
248                 for (j = 0; j < desired_tile_stride; ++j) {
249                         pte =
250                             psb_gtt_mask_pte(page_to_pfn(*pages++), type);
251                         iowrite32(pte, cur_page++);
252                 }
253                 offset_pages += add;
254         }
255         (void) ioread32(cur_page - 1);
256         up_read(&pg->sem);
257
258         return 0;
259 }
260
261 int psb_gtt_insert_phys_addresses(struct psb_gtt *pg, dma_addr_t *pPhysFrames,
262                         unsigned offset_pages, unsigned num_pages, int type)
263 {
264         unsigned j;
265         uint32_t *cur_page = NULL;
266         uint32_t pte;
267         u32 ba;
268
269         down_read(&pg->sem);
270         cur_page = pg->gtt_map + offset_pages;
271         for (j = 0; j < num_pages; ++j) {
272                 ba = *pPhysFrames++;
273                 pte =  psb_gtt_mask_pte(ba >> PAGE_SHIFT, type);
274                 iowrite32(pte, cur_page++);
275         }
276         (void) ioread32(cur_page - 1);
277         up_read(&pg->sem);
278         return 0;
279 }
280
281 int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
282                          unsigned num_pages, unsigned desired_tile_stride,
283                          unsigned hw_tile_stride, int rc_prot)
284 {
285         struct drm_psb_private *dev_priv = pg->dev->dev_private;
286         unsigned rows = 1;
287         unsigned add;
288         unsigned row_add;
289         unsigned i;
290         unsigned j;
291         uint32_t *cur_page = NULL;
292         unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
293         uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
294
295         if (hw_tile_stride)
296                 rows = num_pages / desired_tile_stride;
297         else
298                 desired_tile_stride = num_pages;
299
300         add = desired_tile_stride;
301         row_add = hw_tile_stride;
302
303         if (rc_prot)
304                 down_read(&pg->sem);
305         for (i = 0; i < rows; ++i) {
306                 cur_page = pg->gtt_map + offset_pages;
307                 for (j = 0; j < desired_tile_stride; ++j)
308                         iowrite32(pte, cur_page++);
309
310                 offset_pages += add;
311         }
312         (void) ioread32(cur_page - 1);
313         if (rc_prot)
314                 up_read(&pg->sem);
315
316         return 0;
317 }
318
319 int psb_gtt_mm_init(struct psb_gtt *pg)
320 {
321         struct psb_gtt_mm *gtt_mm;
322         struct drm_psb_private *dev_priv = pg->dev->dev_private;
323         struct drm_open_hash *ht;
324         struct drm_mm *mm;
325         int ret;
326         uint32_t tt_start;
327         uint32_t tt_size;
328
329         if (!pg || !pg->initialized) {
330                 DRM_DEBUG("Invalid gtt struct\n");
331                 return -EINVAL;
332         }
333
334         gtt_mm =  kzalloc(sizeof(struct psb_gtt_mm), GFP_KERNEL);
335         if (!gtt_mm)
336                 return -ENOMEM;
337
338         spin_lock_init(&gtt_mm->lock);
339
340         ht = &gtt_mm->hash;
341         ret = drm_ht_create(ht, 20);
342         if (ret) {
343                 DRM_DEBUG("Create hash table failed(%d)\n", ret);
344                 goto err_free;
345         }
346
347         tt_start = (pg->stolen_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
348         tt_start = (tt_start < pg->gatt_pages) ? tt_start : pg->gatt_pages;
349         tt_size = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
350                         (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
351
352         mm = &gtt_mm->base;
353
354         /*will use tt_start ~ 128M for IMG TT buffers*/
355         ret = drm_mm_init(mm, tt_start, ((tt_size / 2) - tt_start));
356         if (ret) {
357                 DRM_DEBUG("drm_mm_int error(%d)\n", ret);
358                 goto err_mm_init;
359         }
360
361         gtt_mm->count = 0;
362
363         dev_priv->gtt_mm = gtt_mm;
364
365         DRM_INFO("PSB GTT mem manager ready, tt_start %ld, tt_size %ld pages\n",
366                 (unsigned long)tt_start,
367                 (unsigned long)((tt_size / 2) - tt_start));
368         return 0;
369 err_mm_init:
370         drm_ht_remove(ht);
371
372 err_free:
373         kfree(gtt_mm);
374         return ret;
375 }
376
377 /**
378  * Delete all hash entries;
379  */
380 void psb_gtt_mm_takedown(void)
381 {
382         return;
383 }
384
385 static int psb_gtt_mm_get_ht_by_pid_locked(struct psb_gtt_mm *mm,
386                                     u32 tgid,
387                                     struct psb_gtt_hash_entry **hentry)
388 {
389         struct drm_hash_item *entry;
390         struct psb_gtt_hash_entry *psb_entry;
391         int ret;
392
393         ret = drm_ht_find_item(&mm->hash, tgid, &entry);
394         if (ret) {
395                 DRM_DEBUG("Cannot find entry pid=%ld\n", tgid);
396                 return ret;
397         }
398
399         psb_entry = container_of(entry, struct psb_gtt_hash_entry, item);
400         if (!psb_entry) {
401                 DRM_DEBUG("Invalid entry");
402                 return -EINVAL;
403         }
404
405         *hentry = psb_entry;
406         return 0;
407 }
408
409
410 static int psb_gtt_mm_insert_ht_locked(struct psb_gtt_mm *mm,
411                                        u32 tgid,
412                                        struct psb_gtt_hash_entry *hentry)
413 {
414         struct drm_hash_item *item;
415         int ret;
416
417         if (!hentry) {
418                 DRM_DEBUG("Invalid parameters\n");
419                 return -EINVAL;
420         }
421
422         item = &hentry->item;
423         item->key = tgid;
424
425         /**
426          * NOTE: drm_ht_insert_item will perform such a check
427         ret = psb_gtt_mm_get_ht_by_pid(mm, tgid, &tmp);
428         if (!ret) {
429                 DRM_DEBUG("Entry already exists for pid %ld\n", tgid);
430                 return -EAGAIN;
431         }
432         */
433
434         /*Insert the given entry*/
435         ret = drm_ht_insert_item(&mm->hash, item);
436         if (ret) {
437                 DRM_DEBUG("Insert failure\n");
438                 return ret;
439         }
440
441         mm->count++;
442
443         return 0;
444 }
445
446 static int psb_gtt_mm_alloc_insert_ht(struct psb_gtt_mm *mm,
447                                       u32 tgid,
448                                       struct psb_gtt_hash_entry **entry)
449 {
450         struct psb_gtt_hash_entry *hentry;
451         int ret;
452
453         /*if the hentry for this tgid exists, just get it and return*/
454         spin_lock(&mm->lock);
455         ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
456         if (!ret) {
457                 DRM_DEBUG("Entry for tgid %ld exist, hentry %p\n",
458                           tgid, hentry);
459                 *entry = hentry;
460                 spin_unlock(&mm->lock);
461                 return 0;
462         }
463         spin_unlock(&mm->lock);
464
465         DRM_DEBUG("Entry for tgid %ld doesn't exist, will create it\n", tgid);
466
467         hentry = kzalloc(sizeof(struct psb_gtt_hash_entry), GFP_KERNEL);
468         if (!hentry) {
469                 DRM_DEBUG("Kmalloc failled\n");
470                 return -ENOMEM;
471         }
472
473         ret = drm_ht_create(&hentry->ht, 20);
474         if (ret) {
475                 DRM_DEBUG("Create hash table failed\n");
476                 return ret;
477         }
478
479         spin_lock(&mm->lock);
480         ret = psb_gtt_mm_insert_ht_locked(mm, tgid, hentry);
481         spin_unlock(&mm->lock);
482
483         if (!ret)
484                 *entry = hentry;
485
486         return ret;
487 }
488
489 static struct psb_gtt_hash_entry *
490 psb_gtt_mm_remove_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
491 {
492         struct psb_gtt_hash_entry *tmp;
493         int ret;
494
495         ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &tmp);
496         if (ret) {
497                 DRM_DEBUG("Cannot find entry pid %ld\n", tgid);
498                 return NULL;
499         }
500
501         /*remove it from ht*/
502         drm_ht_remove_item(&mm->hash, &tmp->item);
503
504         mm->count--;
505
506         return tmp;
507 }
508
509 static int psb_gtt_mm_remove_free_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
510 {
511         struct psb_gtt_hash_entry *entry;
512
513         entry = psb_gtt_mm_remove_ht_locked(mm, tgid);
514
515         if (!entry) {
516                 DRM_DEBUG("Invalid entry");
517                 return -EINVAL;
518         }
519
520         /*delete ht*/
521         drm_ht_remove(&entry->ht);
522
523         /*free this entry*/
524         kfree(entry);
525         return 0;
526 }
527
528 static int
529 psb_gtt_mm_get_mem_mapping_locked(struct drm_open_hash *ht,
530                            u32 key,
531                            struct psb_gtt_mem_mapping **hentry)
532 {
533         struct drm_hash_item *entry;
534         struct psb_gtt_mem_mapping *mapping;
535         int ret;
536
537         ret = drm_ht_find_item(ht, key, &entry);
538         if (ret) {
539                 DRM_DEBUG("Cannot find key %ld\n", key);
540                 return ret;
541         }
542
543         mapping =  container_of(entry, struct psb_gtt_mem_mapping, item);
544         if (!mapping) {
545                 DRM_DEBUG("Invalid entry\n");
546                 return -EINVAL;
547         }
548
549         *hentry = mapping;
550         return 0;
551 }
552
553 static int
554 psb_gtt_mm_insert_mem_mapping_locked(struct drm_open_hash *ht,
555                               u32 key,
556                               struct psb_gtt_mem_mapping *hentry)
557 {
558         struct drm_hash_item *item;
559         struct psb_gtt_hash_entry *entry;
560         int ret;
561
562         if (!hentry) {
563                 DRM_DEBUG("hentry is NULL\n");
564                 return -EINVAL;
565         }
566
567         item = &hentry->item;
568         item->key = key;
569
570         ret = drm_ht_insert_item(ht, item);
571         if (ret) {
572                 DRM_DEBUG("insert_item failed\n");
573                 return ret;
574         }
575
576         entry = container_of(ht, struct psb_gtt_hash_entry, ht);
577         if (entry)
578                 entry->count++;
579
580         return 0;
581 }
582
583 static int
584 psb_gtt_mm_alloc_insert_mem_mapping(struct psb_gtt_mm *mm,
585                                     struct drm_open_hash *ht,
586                                     u32 key,
587                                     struct drm_mm_node *node,
588                                     struct psb_gtt_mem_mapping **entry)
589 {
590         struct psb_gtt_mem_mapping *mapping;
591         int ret;
592
593         if (!node || !ht) {
594                 DRM_DEBUG("parameter error\n");
595                 return -EINVAL;
596         }
597
598         /*try to get this mem_map */
599         spin_lock(&mm->lock);
600         ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &mapping);
601         if (!ret) {
602                 DRM_DEBUG("mapping entry for key %ld exists, entry %p\n",
603                           key, mapping);
604                 *entry = mapping;
605                 spin_unlock(&mm->lock);
606                 return 0;
607         }
608         spin_unlock(&mm->lock);
609
610         DRM_DEBUG("Mapping entry for key %ld doesn't exist, will create it\n",
611                   key);
612
613         mapping = kzalloc(sizeof(struct psb_gtt_mem_mapping), GFP_KERNEL);
614         if (!mapping) {
615                 DRM_DEBUG("kmalloc failed\n");
616                 return -ENOMEM;
617         }
618
619         mapping->node = node;
620
621         spin_lock(&mm->lock);
622         ret = psb_gtt_mm_insert_mem_mapping_locked(ht, key, mapping);
623         spin_unlock(&mm->lock);
624
625         if (!ret)
626                 *entry = mapping;
627
628         return ret;
629 }
630
631 static struct psb_gtt_mem_mapping *
632 psb_gtt_mm_remove_mem_mapping_locked(struct drm_open_hash *ht, u32 key)
633 {
634         struct psb_gtt_mem_mapping *tmp;
635         struct psb_gtt_hash_entry *entry;
636         int ret;
637
638         ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &tmp);
639         if (ret) {
640                 DRM_DEBUG("Cannot find key %ld\n", key);
641                 return NULL;
642         }
643
644         drm_ht_remove_item(ht, &tmp->item);
645
646         entry = container_of(ht, struct psb_gtt_hash_entry, ht);
647         if (entry)
648                 entry->count--;
649
650         return tmp;
651 }
652
653 static int psb_gtt_mm_remove_free_mem_mapping_locked(struct drm_open_hash *ht,
654                                               u32 key,
655                                               struct drm_mm_node **node)
656 {
657         struct psb_gtt_mem_mapping *entry;
658
659         entry = psb_gtt_mm_remove_mem_mapping_locked(ht, key);
660         if (!entry) {
661                 DRM_DEBUG("entry is NULL\n");
662                 return -EINVAL;
663         }
664
665         *node = entry->node;
666
667         kfree(entry);
668         return 0;
669 }
670
671 static int psb_gtt_add_node(struct psb_gtt_mm *mm,
672                             u32 tgid,
673                             u32 key,
674                             struct drm_mm_node *node,
675                             struct psb_gtt_mem_mapping **entry)
676 {
677         struct psb_gtt_hash_entry *hentry;
678         struct psb_gtt_mem_mapping *mapping;
679         int ret;
680
681         ret = psb_gtt_mm_alloc_insert_ht(mm, tgid, &hentry);
682         if (ret) {
683                 DRM_DEBUG("alloc_insert failed\n");
684                 return ret;
685         }
686
687         ret = psb_gtt_mm_alloc_insert_mem_mapping(mm,
688                                                   &hentry->ht,
689                                                   key,
690                                                   node,
691                                                   &mapping);
692         if (ret) {
693                 DRM_DEBUG("mapping alloc_insert failed\n");
694                 return ret;
695         }
696
697         *entry = mapping;
698
699         return 0;
700 }
701
702 static int psb_gtt_remove_node(struct psb_gtt_mm *mm,
703                                u32 tgid,
704                                u32 key,
705                                struct drm_mm_node **node)
706 {
707         struct psb_gtt_hash_entry *hentry;
708         struct drm_mm_node *tmp;
709         int ret;
710
711         spin_lock(&mm->lock);
712         ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
713         if (ret) {
714                 DRM_DEBUG("Cannot find entry for pid %ld\n", tgid);
715                 spin_unlock(&mm->lock);
716                 return ret;
717         }
718         spin_unlock(&mm->lock);
719
720         /*remove mapping entry*/
721         spin_lock(&mm->lock);
722         ret = psb_gtt_mm_remove_free_mem_mapping_locked(&hentry->ht,
723                                                         key,
724                                                         &tmp);
725         if (ret) {
726                 DRM_DEBUG("remove_free failed\n");
727                 spin_unlock(&mm->lock);
728                 return ret;
729         }
730
731         *node = tmp;
732
733         /*check the count of mapping entry*/
734         if (!hentry->count) {
735                 DRM_DEBUG("count of mapping entry is zero, tgid=%ld\n", tgid);
736                 psb_gtt_mm_remove_free_ht_locked(mm, tgid);
737         }
738
739         spin_unlock(&mm->lock);
740
741         return 0;
742 }
743
744 static int psb_gtt_mm_alloc_mem(struct psb_gtt_mm *mm,
745                                 uint32_t pages,
746                                 uint32_t align,
747                                 struct drm_mm_node **node)
748 {
749         struct drm_mm_node *tmp_node;
750         int ret;
751
752         do {
753                 ret = drm_mm_pre_get(&mm->base);
754                 if (unlikely(ret)) {
755                         DRM_DEBUG("drm_mm_pre_get error\n");
756                         return ret;
757                 }
758
759                 spin_lock(&mm->lock);
760                 tmp_node = drm_mm_search_free(&mm->base, pages, align, 1);
761                 if (unlikely(!tmp_node)) {
762                         DRM_DEBUG("No free node found\n");
763                         spin_unlock(&mm->lock);
764                         break;
765                 }
766
767                 tmp_node = drm_mm_get_block_atomic(tmp_node, pages, align);
768                 spin_unlock(&mm->lock);
769         } while (!tmp_node);
770
771         if (!tmp_node) {
772                 DRM_DEBUG("Node allocation failed\n");
773                 return -ENOMEM;
774         }
775
776         *node = tmp_node;
777         return 0;
778 }
779
780 static void psb_gtt_mm_free_mem(struct psb_gtt_mm *mm, struct drm_mm_node *node)
781 {
782         spin_lock(&mm->lock);
783         drm_mm_put_block(node);
784         spin_unlock(&mm->lock);
785 }
786
787 int psb_gtt_map_meminfo(struct drm_device *dev,
788                         void *hKernelMemInfo,
789                         uint32_t *offset)
790 {
791         return -EINVAL;
792         /* FIXMEAC */
793 #if 0
794         struct drm_psb_private *dev_priv
795                = (struct drm_psb_private *)dev->dev_private;
796         void *psKernelMemInfo;
797         struct psb_gtt_mm *mm = dev_priv->gtt_mm;
798         struct psb_gtt *pg = dev_priv->pg;
799         uint32_t size, pages, offset_pages;
800         void *kmem;
801         struct drm_mm_node *node;
802         struct page **page_list;
803         struct psb_gtt_mem_mapping *mapping = NULL;
804         int ret;
805
806         ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo);
807         if (ret) {
808                 DRM_DEBUG("Cannot find kernelMemInfo handle %ld\n",
809                           hKernelMemInfo);
810                 return -EINVAL;
811         }
812
813         DRM_DEBUG("Got psKernelMemInfo %p for handle %lx\n",
814                   psKernelMemInfo, (u32)hKernelMemInfo);
815         size = psKernelMemInfo->ui32AllocSize;
816         kmem = psKernelMemInfo->pvLinAddrKM;
817         pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
818
819         DRM_DEBUG("KerMemInfo size %ld, cpuVadr %lx, pages %ld, osMemHdl %lx\n",
820                   size, kmem, pages, psKernelMemInfo->sMemBlk.hOSMemHandle);
821
822         if (!kmem)
823                 DRM_DEBUG("kmem is NULL");
824
825         /*get pages*/
826         ret = psb_get_pages_by_mem_handle(psKernelMemInfo->sMemBlk.hOSMemHandle,
827                                           &page_list);
828         if (ret) {
829                 DRM_DEBUG("get pages error\n");
830                 return ret;
831         }
832
833         DRM_DEBUG("get %ld pages\n", pages);
834
835         /*alloc memory in TT apeture*/
836         ret = psb_gtt_mm_alloc_mem(mm, pages, 0, &node);
837         if (ret) {
838                 DRM_DEBUG("alloc TT memory error\n");
839                 goto failed_pages_alloc;
840         }
841
842         /*update psb_gtt_mm*/
843         ret = psb_gtt_add_node(mm,
844                                task_tgid_nr(current),
845                                (u32)hKernelMemInfo,
846                                node,
847                                &mapping);
848         if (ret) {
849                 DRM_DEBUG("add_node failed");
850                 goto failed_add_node;
851         }
852
853         node = mapping->node;
854         offset_pages = node->start;
855
856         DRM_DEBUG("get free node for %ld pages, offset %ld pages",
857                   pages, offset_pages);
858
859         /*update gtt*/
860         psb_gtt_insert_pages(pg, page_list,
861                              (unsigned)offset_pages,
862                              (unsigned)pages,
863                              0,
864                              0,
865                              0);
866
867         *offset = offset_pages;
868         return 0;
869
870 failed_add_node:
871         psb_gtt_mm_free_mem(mm, node);
872 failed_pages_alloc:
873         kfree(page_list);
874         return ret;
875 #endif
876 }
877
878 int psb_gtt_unmap_meminfo(struct drm_device *dev, void * hKernelMemInfo)
879 {
880         struct drm_psb_private *dev_priv
881                = (struct drm_psb_private *)dev->dev_private;
882         struct psb_gtt_mm *mm = dev_priv->gtt_mm;
883         struct psb_gtt *pg = dev_priv->pg;
884         uint32_t pages, offset_pages;
885         struct drm_mm_node *node;
886         int ret;
887
888         ret = psb_gtt_remove_node(mm,
889                         task_tgid_nr(current),
890                         (u32)hKernelMemInfo,
891                         &node);
892         if (ret) {
893                 DRM_DEBUG("remove node failed\n");
894                 return ret;
895         }
896
897         /*remove gtt entries*/
898         offset_pages = node->start;
899         pages = node->size;
900
901         psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0, 1);
902
903
904         /*free tt node*/
905
906         psb_gtt_mm_free_mem(mm, node);
907         return 0;
908 }
909
910 int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data,
911                                struct drm_file *file_priv)
912 {
913         struct psb_gtt_mapping_arg *arg
914                = (struct psb_gtt_mapping_arg *)data;
915         uint32_t *offset_pages = &arg->offset_pages;
916
917         DRM_DEBUG("\n");
918
919         return psb_gtt_map_meminfo(dev, arg->hKernelMemInfo, offset_pages);
920 }
921
922 int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data,
923                                 struct drm_file *file_priv)
924 {
925
926         struct psb_gtt_mapping_arg *arg
927                = (struct psb_gtt_mapping_arg *)data;
928
929         DRM_DEBUG("\n");
930
931         return psb_gtt_unmap_meminfo(dev, arg->hKernelMemInfo);
932 }
933
934 int psb_gtt_map_pvr_memory(struct drm_device *dev,  unsigned int hHandle,
935                         unsigned int ui32TaskId, dma_addr_t *pPages,
936                         unsigned int ui32PagesNum, unsigned int *ui32Offset)
937 {
938         struct drm_psb_private *dev_priv = dev->dev_private;
939         struct psb_gtt_mm *mm = dev_priv->gtt_mm;
940         struct psb_gtt *pg = dev_priv->pg;
941         uint32_t size, pages, offset_pages;
942         struct drm_mm_node *node = NULL;
943         struct psb_gtt_mem_mapping *mapping = NULL;
944         int ret;
945
946         size = ui32PagesNum * PAGE_SIZE;
947         pages = 0;
948
949         /*alloc memory in TT apeture*/
950         ret = psb_gtt_mm_alloc_mem(mm, ui32PagesNum, 0, &node);
951         if (ret) {
952                 DRM_DEBUG("alloc TT memory error\n");
953                 goto failed_pages_alloc;
954         }
955
956    /*update psb_gtt_mm*/
957         ret = psb_gtt_add_node(mm,
958                                                    (u32)ui32TaskId,
959                                                    (u32)hHandle,
960                                                    node,
961                                                    &mapping);
962         if (ret) {
963                 DRM_DEBUG("add_node failed");
964                 goto failed_add_node;
965         }
966
967         node = mapping->node;
968         offset_pages = node->start;
969
970         DRM_DEBUG("get free node for %ld pages, offset %ld pages",
971                                                         pages, offset_pages);
972
973         /*update gtt*/
974         psb_gtt_insert_phys_addresses(pg, pPages, (unsigned)offset_pages,
975                                                 (unsigned)ui32PagesNum, 0);
976
977         *ui32Offset = offset_pages;
978         return 0;
979
980 failed_add_node:
981         psb_gtt_mm_free_mem(mm, node);
982 failed_pages_alloc:
983         return ret;
984 }
985
986
987 int psb_gtt_unmap_pvr_memory(struct drm_device *dev, unsigned int hHandle,
988                                                 unsigned int ui32TaskId)
989 {
990         struct drm_psb_private *dev_priv = dev->dev_private;
991         struct psb_gtt_mm *mm = dev_priv->gtt_mm;
992         struct psb_gtt *pg = dev_priv->pg;
993         uint32_t pages, offset_pages;
994         struct drm_mm_node *node;
995         int ret;
996
997         ret = psb_gtt_remove_node(mm, (u32)ui32TaskId, (u32)hHandle, &node);
998         if (ret) {
999                 printk(KERN_ERR "remove node failed\n");
1000                 return ret;
1001         }
1002
1003         /*remove gtt entries*/
1004         offset_pages = node->start;
1005         pages = node->size;
1006
1007         psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0, 1);
1008
1009         /*free tt node*/
1010         psb_gtt_mm_free_mem(mm, node);
1011         return 0;
1012 }