i915: Use struct_mutex to protect ring in GEM mode.
[pandora-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include <linux/swap.h>
33
34 static int
35 i915_gem_object_set_domain(struct drm_gem_object *obj,
36                             uint32_t read_domains,
37                             uint32_t write_domain);
38 static int
39 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
40                                  uint64_t offset,
41                                  uint64_t size,
42                                  uint32_t read_domains,
43                                  uint32_t write_domain);
44 static int
45 i915_gem_set_domain(struct drm_gem_object *obj,
46                     struct drm_file *file_priv,
47                     uint32_t read_domains,
48                     uint32_t write_domain);
49 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
50 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
51 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
52
53 int
54 i915_gem_init_ioctl(struct drm_device *dev, void *data,
55                     struct drm_file *file_priv)
56 {
57         drm_i915_private_t *dev_priv = dev->dev_private;
58         struct drm_i915_gem_init *args = data;
59
60         mutex_lock(&dev->struct_mutex);
61
62         if (args->gtt_start >= args->gtt_end ||
63             (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
64             (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
65                 mutex_unlock(&dev->struct_mutex);
66                 return -EINVAL;
67         }
68
69         drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
70             args->gtt_end - args->gtt_start);
71
72         dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
73
74         mutex_unlock(&dev->struct_mutex);
75
76         return 0;
77 }
78
79
80 /**
81  * Creates a new mm object and returns a handle to it.
82  */
83 int
84 i915_gem_create_ioctl(struct drm_device *dev, void *data,
85                       struct drm_file *file_priv)
86 {
87         struct drm_i915_gem_create *args = data;
88         struct drm_gem_object *obj;
89         int handle, ret;
90
91         args->size = roundup(args->size, PAGE_SIZE);
92
93         /* Allocate the new object */
94         obj = drm_gem_object_alloc(dev, args->size);
95         if (obj == NULL)
96                 return -ENOMEM;
97
98         ret = drm_gem_handle_create(file_priv, obj, &handle);
99         mutex_lock(&dev->struct_mutex);
100         drm_gem_object_handle_unreference(obj);
101         mutex_unlock(&dev->struct_mutex);
102
103         if (ret)
104                 return ret;
105
106         args->handle = handle;
107
108         return 0;
109 }
110
111 /**
112  * Reads data from the object referenced by handle.
113  *
114  * On error, the contents of *data are undefined.
115  */
116 int
117 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
118                      struct drm_file *file_priv)
119 {
120         struct drm_i915_gem_pread *args = data;
121         struct drm_gem_object *obj;
122         struct drm_i915_gem_object *obj_priv;
123         ssize_t read;
124         loff_t offset;
125         int ret;
126
127         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
128         if (obj == NULL)
129                 return -EBADF;
130         obj_priv = obj->driver_private;
131
132         /* Bounds check source.
133          *
134          * XXX: This could use review for overflow issues...
135          */
136         if (args->offset > obj->size || args->size > obj->size ||
137             args->offset + args->size > obj->size) {
138                 drm_gem_object_unreference(obj);
139                 return -EINVAL;
140         }
141
142         mutex_lock(&dev->struct_mutex);
143
144         ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
145                                                I915_GEM_DOMAIN_CPU, 0);
146         if (ret != 0) {
147                 drm_gem_object_unreference(obj);
148                 mutex_unlock(&dev->struct_mutex);
149         }
150
151         offset = args->offset;
152
153         read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
154                         args->size, &offset);
155         if (read != args->size) {
156                 drm_gem_object_unreference(obj);
157                 mutex_unlock(&dev->struct_mutex);
158                 if (read < 0)
159                         return read;
160                 else
161                         return -EINVAL;
162         }
163
164         drm_gem_object_unreference(obj);
165         mutex_unlock(&dev->struct_mutex);
166
167         return 0;
168 }
169
170 static int
171 i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
172                     struct drm_i915_gem_pwrite *args,
173                     struct drm_file *file_priv)
174 {
175         struct drm_i915_gem_object *obj_priv = obj->driver_private;
176         ssize_t remain;
177         loff_t offset;
178         char __user *user_data;
179         char *vaddr;
180         int i, o, l;
181         int ret = 0;
182         unsigned long pfn;
183         unsigned long unwritten;
184
185         user_data = (char __user *) (uintptr_t) args->data_ptr;
186         remain = args->size;
187         if (!access_ok(VERIFY_READ, user_data, remain))
188                 return -EFAULT;
189
190
191         mutex_lock(&dev->struct_mutex);
192         ret = i915_gem_object_pin(obj, 0);
193         if (ret) {
194                 mutex_unlock(&dev->struct_mutex);
195                 return ret;
196         }
197         ret = i915_gem_set_domain(obj, file_priv,
198                                   I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
199         if (ret)
200                 goto fail;
201
202         obj_priv = obj->driver_private;
203         offset = obj_priv->gtt_offset + args->offset;
204         obj_priv->dirty = 1;
205
206         while (remain > 0) {
207                 /* Operation in this page
208                  *
209                  * i = page number
210                  * o = offset within page
211                  * l = bytes to copy
212                  */
213                 i = offset >> PAGE_SHIFT;
214                 o = offset & (PAGE_SIZE-1);
215                 l = remain;
216                 if ((o + l) > PAGE_SIZE)
217                         l = PAGE_SIZE - o;
218
219                 pfn = (dev->agp->base >> PAGE_SHIFT) + i;
220
221 #ifdef CONFIG_HIGHMEM
222                 /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
223                  */
224                 vaddr = kmap_atomic_pfn(pfn, KM_USER0);
225 #if WATCH_PWRITE
226                 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
227                          i, o, l, pfn, vaddr);
228 #endif
229                 unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
230                                                               user_data, l);
231                 kunmap_atomic(vaddr, KM_USER0);
232
233                 if (unwritten)
234 #endif /* CONFIG_HIGHMEM */
235                 {
236                         vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
237 #if WATCH_PWRITE
238                         DRM_INFO("pwrite slow i %d o %d l %d "
239                                  "pfn %ld vaddr %p\n",
240                                  i, o, l, pfn, vaddr);
241 #endif
242                         if (vaddr == NULL) {
243                                 ret = -EFAULT;
244                                 goto fail;
245                         }
246                         unwritten = __copy_from_user(vaddr + o, user_data, l);
247 #if WATCH_PWRITE
248                         DRM_INFO("unwritten %ld\n", unwritten);
249 #endif
250                         iounmap(vaddr);
251                         if (unwritten) {
252                                 ret = -EFAULT;
253                                 goto fail;
254                         }
255                 }
256
257                 remain -= l;
258                 user_data += l;
259                 offset += l;
260         }
261 #if WATCH_PWRITE && 1
262         i915_gem_clflush_object(obj);
263         i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
264         i915_gem_clflush_object(obj);
265 #endif
266
267 fail:
268         i915_gem_object_unpin(obj);
269         mutex_unlock(&dev->struct_mutex);
270
271         return ret;
272 }
273
274 int
275 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
276                       struct drm_i915_gem_pwrite *args,
277                       struct drm_file *file_priv)
278 {
279         int ret;
280         loff_t offset;
281         ssize_t written;
282
283         mutex_lock(&dev->struct_mutex);
284
285         ret = i915_gem_set_domain(obj, file_priv,
286                                   I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
287         if (ret) {
288                 mutex_unlock(&dev->struct_mutex);
289                 return ret;
290         }
291
292         offset = args->offset;
293
294         written = vfs_write(obj->filp,
295                             (char __user *)(uintptr_t) args->data_ptr,
296                             args->size, &offset);
297         if (written != args->size) {
298                 mutex_unlock(&dev->struct_mutex);
299                 if (written < 0)
300                         return written;
301                 else
302                         return -EINVAL;
303         }
304
305         mutex_unlock(&dev->struct_mutex);
306
307         return 0;
308 }
309
310 /**
311  * Writes data to the object referenced by handle.
312  *
313  * On error, the contents of the buffer that were to be modified are undefined.
314  */
315 int
316 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
317                       struct drm_file *file_priv)
318 {
319         struct drm_i915_gem_pwrite *args = data;
320         struct drm_gem_object *obj;
321         struct drm_i915_gem_object *obj_priv;
322         int ret = 0;
323
324         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
325         if (obj == NULL)
326                 return -EBADF;
327         obj_priv = obj->driver_private;
328
329         /* Bounds check destination.
330          *
331          * XXX: This could use review for overflow issues...
332          */
333         if (args->offset > obj->size || args->size > obj->size ||
334             args->offset + args->size > obj->size) {
335                 drm_gem_object_unreference(obj);
336                 return -EINVAL;
337         }
338
339         /* We can only do the GTT pwrite on untiled buffers, as otherwise
340          * it would end up going through the fenced access, and we'll get
341          * different detiling behavior between reading and writing.
342          * pread/pwrite currently are reading and writing from the CPU
343          * perspective, requiring manual detiling by the client.
344          */
345         if (obj_priv->tiling_mode == I915_TILING_NONE &&
346             dev->gtt_total != 0)
347                 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
348         else
349                 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
350
351 #if WATCH_PWRITE
352         if (ret)
353                 DRM_INFO("pwrite failed %d\n", ret);
354 #endif
355
356         drm_gem_object_unreference(obj);
357
358         return ret;
359 }
360
361 /**
362  * Called when user space prepares to use an object
363  */
364 int
365 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
366                           struct drm_file *file_priv)
367 {
368         struct drm_i915_gem_set_domain *args = data;
369         struct drm_gem_object *obj;
370         int ret;
371
372         if (!(dev->driver->driver_features & DRIVER_GEM))
373                 return -ENODEV;
374
375         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
376         if (obj == NULL)
377                 return -EBADF;
378
379         mutex_lock(&dev->struct_mutex);
380 #if WATCH_BUF
381         DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
382                  obj, obj->size, args->read_domains, args->write_domain);
383 #endif
384         ret = i915_gem_set_domain(obj, file_priv,
385                                   args->read_domains, args->write_domain);
386         drm_gem_object_unreference(obj);
387         mutex_unlock(&dev->struct_mutex);
388         return ret;
389 }
390
391 /**
392  * Called when user space has done writes to this buffer
393  */
394 int
395 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
396                       struct drm_file *file_priv)
397 {
398         struct drm_i915_gem_sw_finish *args = data;
399         struct drm_gem_object *obj;
400         struct drm_i915_gem_object *obj_priv;
401         int ret = 0;
402
403         if (!(dev->driver->driver_features & DRIVER_GEM))
404                 return -ENODEV;
405
406         mutex_lock(&dev->struct_mutex);
407         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
408         if (obj == NULL) {
409                 mutex_unlock(&dev->struct_mutex);
410                 return -EBADF;
411         }
412
413 #if WATCH_BUF
414         DRM_INFO("%s: sw_finish %d (%p %d)\n",
415                  __func__, args->handle, obj, obj->size);
416 #endif
417         obj_priv = obj->driver_private;
418
419         /* Pinned buffers may be scanout, so flush the cache */
420         if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
421                 i915_gem_clflush_object(obj);
422                 drm_agp_chipset_flush(dev);
423         }
424         drm_gem_object_unreference(obj);
425         mutex_unlock(&dev->struct_mutex);
426         return ret;
427 }
428
429 /**
430  * Maps the contents of an object, returning the address it is mapped
431  * into.
432  *
433  * While the mapping holds a reference on the contents of the object, it doesn't
434  * imply a ref on the object itself.
435  */
436 int
437 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
438                    struct drm_file *file_priv)
439 {
440         struct drm_i915_gem_mmap *args = data;
441         struct drm_gem_object *obj;
442         loff_t offset;
443         unsigned long addr;
444
445         if (!(dev->driver->driver_features & DRIVER_GEM))
446                 return -ENODEV;
447
448         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
449         if (obj == NULL)
450                 return -EBADF;
451
452         offset = args->offset;
453
454         down_write(&current->mm->mmap_sem);
455         addr = do_mmap(obj->filp, 0, args->size,
456                        PROT_READ | PROT_WRITE, MAP_SHARED,
457                        args->offset);
458         up_write(&current->mm->mmap_sem);
459         mutex_lock(&dev->struct_mutex);
460         drm_gem_object_unreference(obj);
461         mutex_unlock(&dev->struct_mutex);
462         if (IS_ERR((void *)addr))
463                 return addr;
464
465         args->addr_ptr = (uint64_t) addr;
466
467         return 0;
468 }
469
470 static void
471 i915_gem_object_free_page_list(struct drm_gem_object *obj)
472 {
473         struct drm_i915_gem_object *obj_priv = obj->driver_private;
474         int page_count = obj->size / PAGE_SIZE;
475         int i;
476
477         if (obj_priv->page_list == NULL)
478                 return;
479
480
481         for (i = 0; i < page_count; i++)
482                 if (obj_priv->page_list[i] != NULL) {
483                         if (obj_priv->dirty)
484                                 set_page_dirty(obj_priv->page_list[i]);
485                         mark_page_accessed(obj_priv->page_list[i]);
486                         page_cache_release(obj_priv->page_list[i]);
487                 }
488         obj_priv->dirty = 0;
489
490         drm_free(obj_priv->page_list,
491                  page_count * sizeof(struct page *),
492                  DRM_MEM_DRIVER);
493         obj_priv->page_list = NULL;
494 }
495
496 static void
497 i915_gem_object_move_to_active(struct drm_gem_object *obj)
498 {
499         struct drm_device *dev = obj->dev;
500         drm_i915_private_t *dev_priv = dev->dev_private;
501         struct drm_i915_gem_object *obj_priv = obj->driver_private;
502
503         /* Add a reference if we're newly entering the active list. */
504         if (!obj_priv->active) {
505                 drm_gem_object_reference(obj);
506                 obj_priv->active = 1;
507         }
508         /* Move from whatever list we were on to the tail of execution. */
509         list_move_tail(&obj_priv->list,
510                        &dev_priv->mm.active_list);
511 }
512
513
514 static void
515 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
516 {
517         struct drm_device *dev = obj->dev;
518         drm_i915_private_t *dev_priv = dev->dev_private;
519         struct drm_i915_gem_object *obj_priv = obj->driver_private;
520
521         i915_verify_inactive(dev, __FILE__, __LINE__);
522         if (obj_priv->pin_count != 0)
523                 list_del_init(&obj_priv->list);
524         else
525                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
526
527         if (obj_priv->active) {
528                 obj_priv->active = 0;
529                 drm_gem_object_unreference(obj);
530         }
531         i915_verify_inactive(dev, __FILE__, __LINE__);
532 }
533
534 /**
535  * Creates a new sequence number, emitting a write of it to the status page
536  * plus an interrupt, which will trigger i915_user_interrupt_handler.
537  *
538  * Must be called with struct_lock held.
539  *
540  * Returned sequence numbers are nonzero on success.
541  */
542 static uint32_t
543 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
544 {
545         drm_i915_private_t *dev_priv = dev->dev_private;
546         struct drm_i915_gem_request *request;
547         uint32_t seqno;
548         int was_empty;
549         RING_LOCALS;
550
551         request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
552         if (request == NULL)
553                 return 0;
554
555         /* Grab the seqno we're going to make this request be, and bump the
556          * next (skipping 0 so it can be the reserved no-seqno value).
557          */
558         seqno = dev_priv->mm.next_gem_seqno;
559         dev_priv->mm.next_gem_seqno++;
560         if (dev_priv->mm.next_gem_seqno == 0)
561                 dev_priv->mm.next_gem_seqno++;
562
563         BEGIN_LP_RING(4);
564         OUT_RING(MI_STORE_DWORD_INDEX);
565         OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
566         OUT_RING(seqno);
567
568         OUT_RING(MI_USER_INTERRUPT);
569         ADVANCE_LP_RING();
570
571         DRM_DEBUG("%d\n", seqno);
572
573         request->seqno = seqno;
574         request->emitted_jiffies = jiffies;
575         request->flush_domains = flush_domains;
576         was_empty = list_empty(&dev_priv->mm.request_list);
577         list_add_tail(&request->list, &dev_priv->mm.request_list);
578
579         if (was_empty)
580                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
581         return seqno;
582 }
583
584 /**
585  * Command execution barrier
586  *
587  * Ensures that all commands in the ring are finished
588  * before signalling the CPU
589  */
590 uint32_t
591 i915_retire_commands(struct drm_device *dev)
592 {
593         drm_i915_private_t *dev_priv = dev->dev_private;
594         uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
595         uint32_t flush_domains = 0;
596         RING_LOCALS;
597
598         /* The sampler always gets flushed on i965 (sigh) */
599         if (IS_I965G(dev))
600                 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
601         BEGIN_LP_RING(2);
602         OUT_RING(cmd);
603         OUT_RING(0); /* noop */
604         ADVANCE_LP_RING();
605         return flush_domains;
606 }
607
608 /**
609  * Moves buffers associated only with the given active seqno from the active
610  * to inactive list, potentially freeing them.
611  */
612 static void
613 i915_gem_retire_request(struct drm_device *dev,
614                         struct drm_i915_gem_request *request)
615 {
616         drm_i915_private_t *dev_priv = dev->dev_private;
617
618         /* Move any buffers on the active list that are no longer referenced
619          * by the ringbuffer to the flushing/inactive lists as appropriate.
620          */
621         while (!list_empty(&dev_priv->mm.active_list)) {
622                 struct drm_gem_object *obj;
623                 struct drm_i915_gem_object *obj_priv;
624
625                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
626                                             struct drm_i915_gem_object,
627                                             list);
628                 obj = obj_priv->obj;
629
630                 /* If the seqno being retired doesn't match the oldest in the
631                  * list, then the oldest in the list must still be newer than
632                  * this seqno.
633                  */
634                 if (obj_priv->last_rendering_seqno != request->seqno)
635                         return;
636 #if WATCH_LRU
637                 DRM_INFO("%s: retire %d moves to inactive list %p\n",
638                          __func__, request->seqno, obj);
639 #endif
640
641                 if (obj->write_domain != 0) {
642                         list_move_tail(&obj_priv->list,
643                                        &dev_priv->mm.flushing_list);
644                 } else {
645                         i915_gem_object_move_to_inactive(obj);
646                 }
647         }
648
649         if (request->flush_domains != 0) {
650                 struct drm_i915_gem_object *obj_priv, *next;
651
652                 /* Clear the write domain and activity from any buffers
653                  * that are just waiting for a flush matching the one retired.
654                  */
655                 list_for_each_entry_safe(obj_priv, next,
656                                          &dev_priv->mm.flushing_list, list) {
657                         struct drm_gem_object *obj = obj_priv->obj;
658
659                         if (obj->write_domain & request->flush_domains) {
660                                 obj->write_domain = 0;
661                                 i915_gem_object_move_to_inactive(obj);
662                         }
663                 }
664
665         }
666 }
667
668 /**
669  * Returns true if seq1 is later than seq2.
670  */
671 static int
672 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
673 {
674         return (int32_t)(seq1 - seq2) >= 0;
675 }
676
677 uint32_t
678 i915_get_gem_seqno(struct drm_device *dev)
679 {
680         drm_i915_private_t *dev_priv = dev->dev_private;
681
682         return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
683 }
684
685 /**
686  * This function clears the request list as sequence numbers are passed.
687  */
688 void
689 i915_gem_retire_requests(struct drm_device *dev)
690 {
691         drm_i915_private_t *dev_priv = dev->dev_private;
692         uint32_t seqno;
693
694         seqno = i915_get_gem_seqno(dev);
695
696         while (!list_empty(&dev_priv->mm.request_list)) {
697                 struct drm_i915_gem_request *request;
698                 uint32_t retiring_seqno;
699
700                 request = list_first_entry(&dev_priv->mm.request_list,
701                                            struct drm_i915_gem_request,
702                                            list);
703                 retiring_seqno = request->seqno;
704
705                 if (i915_seqno_passed(seqno, retiring_seqno) ||
706                     dev_priv->mm.wedged) {
707                         i915_gem_retire_request(dev, request);
708
709                         list_del(&request->list);
710                         drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
711                 } else
712                         break;
713         }
714 }
715
716 void
717 i915_gem_retire_work_handler(struct work_struct *work)
718 {
719         drm_i915_private_t *dev_priv;
720         struct drm_device *dev;
721
722         dev_priv = container_of(work, drm_i915_private_t,
723                                 mm.retire_work.work);
724         dev = dev_priv->dev;
725
726         mutex_lock(&dev->struct_mutex);
727         i915_gem_retire_requests(dev);
728         if (!list_empty(&dev_priv->mm.request_list))
729                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
730         mutex_unlock(&dev->struct_mutex);
731 }
732
733 /**
734  * Waits for a sequence number to be signaled, and cleans up the
735  * request and object lists appropriately for that event.
736  */
737 int
738 i915_wait_request(struct drm_device *dev, uint32_t seqno)
739 {
740         drm_i915_private_t *dev_priv = dev->dev_private;
741         int ret = 0;
742
743         BUG_ON(seqno == 0);
744
745         if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
746                 dev_priv->mm.waiting_gem_seqno = seqno;
747                 i915_user_irq_get(dev);
748                 ret = wait_event_interruptible(dev_priv->irq_queue,
749                                                i915_seqno_passed(i915_get_gem_seqno(dev),
750                                                                  seqno) ||
751                                                dev_priv->mm.wedged);
752                 i915_user_irq_put(dev);
753                 dev_priv->mm.waiting_gem_seqno = 0;
754         }
755         if (dev_priv->mm.wedged)
756                 ret = -EIO;
757
758         if (ret && ret != -ERESTARTSYS)
759                 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
760                           __func__, ret, seqno, i915_get_gem_seqno(dev));
761
762         /* Directly dispatch request retiring.  While we have the work queue
763          * to handle this, the waiter on a request often wants an associated
764          * buffer to have made it to the inactive list, and we would need
765          * a separate wait queue to handle that.
766          */
767         if (ret == 0)
768                 i915_gem_retire_requests(dev);
769
770         return ret;
771 }
772
773 static void
774 i915_gem_flush(struct drm_device *dev,
775                uint32_t invalidate_domains,
776                uint32_t flush_domains)
777 {
778         drm_i915_private_t *dev_priv = dev->dev_private;
779         uint32_t cmd;
780         RING_LOCALS;
781
782 #if WATCH_EXEC
783         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
784                   invalidate_domains, flush_domains);
785 #endif
786
787         if (flush_domains & I915_GEM_DOMAIN_CPU)
788                 drm_agp_chipset_flush(dev);
789
790         if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
791                                                      I915_GEM_DOMAIN_GTT)) {
792                 /*
793                  * read/write caches:
794                  *
795                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
796                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
797                  * also flushed at 2d versus 3d pipeline switches.
798                  *
799                  * read-only caches:
800                  *
801                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
802                  * MI_READ_FLUSH is set, and is always flushed on 965.
803                  *
804                  * I915_GEM_DOMAIN_COMMAND may not exist?
805                  *
806                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
807                  * invalidated when MI_EXE_FLUSH is set.
808                  *
809                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
810                  * invalidated with every MI_FLUSH.
811                  *
812                  * TLBs:
813                  *
814                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
815                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
816                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
817                  * are flushed at any MI_FLUSH.
818                  */
819
820                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
821                 if ((invalidate_domains|flush_domains) &
822                     I915_GEM_DOMAIN_RENDER)
823                         cmd &= ~MI_NO_WRITE_FLUSH;
824                 if (!IS_I965G(dev)) {
825                         /*
826                          * On the 965, the sampler cache always gets flushed
827                          * and this bit is reserved.
828                          */
829                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
830                                 cmd |= MI_READ_FLUSH;
831                 }
832                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
833                         cmd |= MI_EXE_FLUSH;
834
835 #if WATCH_EXEC
836                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
837 #endif
838                 BEGIN_LP_RING(2);
839                 OUT_RING(cmd);
840                 OUT_RING(0); /* noop */
841                 ADVANCE_LP_RING();
842         }
843 }
844
845 /**
846  * Ensures that all rendering to the object has completed and the object is
847  * safe to unbind from the GTT or access from the CPU.
848  */
849 static int
850 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
851 {
852         struct drm_device *dev = obj->dev;
853         struct drm_i915_gem_object *obj_priv = obj->driver_private;
854         int ret;
855
856         /* If there are writes queued to the buffer, flush and
857          * create a new seqno to wait for.
858          */
859         if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
860                 uint32_t write_domain = obj->write_domain;
861 #if WATCH_BUF
862                 DRM_INFO("%s: flushing object %p from write domain %08x\n",
863                           __func__, obj, write_domain);
864 #endif
865                 i915_gem_flush(dev, 0, write_domain);
866
867                 i915_gem_object_move_to_active(obj);
868                 obj_priv->last_rendering_seqno = i915_add_request(dev,
869                                                                   write_domain);
870                 BUG_ON(obj_priv->last_rendering_seqno == 0);
871 #if WATCH_LRU
872                 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
873 #endif
874         }
875
876         /* If there is rendering queued on the buffer being evicted, wait for
877          * it.
878          */
879         if (obj_priv->active) {
880 #if WATCH_BUF
881                 DRM_INFO("%s: object %p wait for seqno %08x\n",
882                           __func__, obj, obj_priv->last_rendering_seqno);
883 #endif
884                 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
885                 if (ret != 0)
886                         return ret;
887         }
888
889         return 0;
890 }
891
892 /**
893  * Unbinds an object from the GTT aperture.
894  */
895 static int
896 i915_gem_object_unbind(struct drm_gem_object *obj)
897 {
898         struct drm_device *dev = obj->dev;
899         struct drm_i915_gem_object *obj_priv = obj->driver_private;
900         int ret = 0;
901
902 #if WATCH_BUF
903         DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
904         DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
905 #endif
906         if (obj_priv->gtt_space == NULL)
907                 return 0;
908
909         if (obj_priv->pin_count != 0) {
910                 DRM_ERROR("Attempting to unbind pinned buffer\n");
911                 return -EINVAL;
912         }
913
914         /* Wait for any rendering to complete
915          */
916         ret = i915_gem_object_wait_rendering(obj);
917         if (ret) {
918                 DRM_ERROR("wait_rendering failed: %d\n", ret);
919                 return ret;
920         }
921
922         /* Move the object to the CPU domain to ensure that
923          * any possible CPU writes while it's not in the GTT
924          * are flushed when we go to remap it. This will
925          * also ensure that all pending GPU writes are finished
926          * before we unbind.
927          */
928         ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
929                                          I915_GEM_DOMAIN_CPU);
930         if (ret) {
931                 DRM_ERROR("set_domain failed: %d\n", ret);
932                 return ret;
933         }
934
935         if (obj_priv->agp_mem != NULL) {
936                 drm_unbind_agp(obj_priv->agp_mem);
937                 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
938                 obj_priv->agp_mem = NULL;
939         }
940
941         BUG_ON(obj_priv->active);
942
943         i915_gem_object_free_page_list(obj);
944
945         if (obj_priv->gtt_space) {
946                 atomic_dec(&dev->gtt_count);
947                 atomic_sub(obj->size, &dev->gtt_memory);
948
949                 drm_mm_put_block(obj_priv->gtt_space);
950                 obj_priv->gtt_space = NULL;
951         }
952
953         /* Remove ourselves from the LRU list if present. */
954         if (!list_empty(&obj_priv->list))
955                 list_del_init(&obj_priv->list);
956
957         return 0;
958 }
959
960 static int
961 i915_gem_evict_something(struct drm_device *dev)
962 {
963         drm_i915_private_t *dev_priv = dev->dev_private;
964         struct drm_gem_object *obj;
965         struct drm_i915_gem_object *obj_priv;
966         int ret = 0;
967
968         for (;;) {
969                 /* If there's an inactive buffer available now, grab it
970                  * and be done.
971                  */
972                 if (!list_empty(&dev_priv->mm.inactive_list)) {
973                         obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
974                                                     struct drm_i915_gem_object,
975                                                     list);
976                         obj = obj_priv->obj;
977                         BUG_ON(obj_priv->pin_count != 0);
978 #if WATCH_LRU
979                         DRM_INFO("%s: evicting %p\n", __func__, obj);
980 #endif
981                         BUG_ON(obj_priv->active);
982
983                         /* Wait on the rendering and unbind the buffer. */
984                         ret = i915_gem_object_unbind(obj);
985                         break;
986                 }
987
988                 /* If we didn't get anything, but the ring is still processing
989                  * things, wait for one of those things to finish and hopefully
990                  * leave us a buffer to evict.
991                  */
992                 if (!list_empty(&dev_priv->mm.request_list)) {
993                         struct drm_i915_gem_request *request;
994
995                         request = list_first_entry(&dev_priv->mm.request_list,
996                                                    struct drm_i915_gem_request,
997                                                    list);
998
999                         ret = i915_wait_request(dev, request->seqno);
1000                         if (ret)
1001                                 break;
1002
1003                         /* if waiting caused an object to become inactive,
1004                          * then loop around and wait for it. Otherwise, we
1005                          * assume that waiting freed and unbound something,
1006                          * so there should now be some space in the GTT
1007                          */
1008                         if (!list_empty(&dev_priv->mm.inactive_list))
1009                                 continue;
1010                         break;
1011                 }
1012
1013                 /* If we didn't have anything on the request list but there
1014                  * are buffers awaiting a flush, emit one and try again.
1015                  * When we wait on it, those buffers waiting for that flush
1016                  * will get moved to inactive.
1017                  */
1018                 if (!list_empty(&dev_priv->mm.flushing_list)) {
1019                         obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1020                                                     struct drm_i915_gem_object,
1021                                                     list);
1022                         obj = obj_priv->obj;
1023
1024                         i915_gem_flush(dev,
1025                                        obj->write_domain,
1026                                        obj->write_domain);
1027                         i915_add_request(dev, obj->write_domain);
1028
1029                         obj = NULL;
1030                         continue;
1031                 }
1032
1033                 DRM_ERROR("inactive empty %d request empty %d "
1034                           "flushing empty %d\n",
1035                           list_empty(&dev_priv->mm.inactive_list),
1036                           list_empty(&dev_priv->mm.request_list),
1037                           list_empty(&dev_priv->mm.flushing_list));
1038                 /* If we didn't do any of the above, there's nothing to be done
1039                  * and we just can't fit it in.
1040                  */
1041                 return -ENOMEM;
1042         }
1043         return ret;
1044 }
1045
1046 static int
1047 i915_gem_object_get_page_list(struct drm_gem_object *obj)
1048 {
1049         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1050         int page_count, i;
1051         struct address_space *mapping;
1052         struct inode *inode;
1053         struct page *page;
1054         int ret;
1055
1056         if (obj_priv->page_list)
1057                 return 0;
1058
1059         /* Get the list of pages out of our struct file.  They'll be pinned
1060          * at this point until we release them.
1061          */
1062         page_count = obj->size / PAGE_SIZE;
1063         BUG_ON(obj_priv->page_list != NULL);
1064         obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1065                                          DRM_MEM_DRIVER);
1066         if (obj_priv->page_list == NULL) {
1067                 DRM_ERROR("Faled to allocate page list\n");
1068                 return -ENOMEM;
1069         }
1070
1071         inode = obj->filp->f_path.dentry->d_inode;
1072         mapping = inode->i_mapping;
1073         for (i = 0; i < page_count; i++) {
1074                 page = read_mapping_page(mapping, i, NULL);
1075                 if (IS_ERR(page)) {
1076                         ret = PTR_ERR(page);
1077                         DRM_ERROR("read_mapping_page failed: %d\n", ret);
1078                         i915_gem_object_free_page_list(obj);
1079                         return ret;
1080                 }
1081                 obj_priv->page_list[i] = page;
1082         }
1083         return 0;
1084 }
1085
1086 /**
1087  * Finds free space in the GTT aperture and binds the object there.
1088  */
1089 static int
1090 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1091 {
1092         struct drm_device *dev = obj->dev;
1093         drm_i915_private_t *dev_priv = dev->dev_private;
1094         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1095         struct drm_mm_node *free_space;
1096         int page_count, ret;
1097
1098         if (alignment == 0)
1099                 alignment = PAGE_SIZE;
1100         if (alignment & (PAGE_SIZE - 1)) {
1101                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1102                 return -EINVAL;
1103         }
1104
1105  search_free:
1106         free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
1107                                         obj->size, alignment, 0);
1108         if (free_space != NULL) {
1109                 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
1110                                                        alignment);
1111                 if (obj_priv->gtt_space != NULL) {
1112                         obj_priv->gtt_space->private = obj;
1113                         obj_priv->gtt_offset = obj_priv->gtt_space->start;
1114                 }
1115         }
1116         if (obj_priv->gtt_space == NULL) {
1117                 /* If the gtt is empty and we're still having trouble
1118                  * fitting our object in, we're out of memory.
1119                  */
1120 #if WATCH_LRU
1121                 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1122 #endif
1123                 if (list_empty(&dev_priv->mm.inactive_list) &&
1124                     list_empty(&dev_priv->mm.flushing_list) &&
1125                     list_empty(&dev_priv->mm.active_list)) {
1126                         DRM_ERROR("GTT full, but LRU list empty\n");
1127                         return -ENOMEM;
1128                 }
1129
1130                 ret = i915_gem_evict_something(dev);
1131                 if (ret != 0) {
1132                         DRM_ERROR("Failed to evict a buffer %d\n", ret);
1133                         return ret;
1134                 }
1135                 goto search_free;
1136         }
1137
1138 #if WATCH_BUF
1139         DRM_INFO("Binding object of size %d at 0x%08x\n",
1140                  obj->size, obj_priv->gtt_offset);
1141 #endif
1142         ret = i915_gem_object_get_page_list(obj);
1143         if (ret) {
1144                 drm_mm_put_block(obj_priv->gtt_space);
1145                 obj_priv->gtt_space = NULL;
1146                 return ret;
1147         }
1148
1149         page_count = obj->size / PAGE_SIZE;
1150         /* Create an AGP memory structure pointing at our pages, and bind it
1151          * into the GTT.
1152          */
1153         obj_priv->agp_mem = drm_agp_bind_pages(dev,
1154                                                obj_priv->page_list,
1155                                                page_count,
1156                                                obj_priv->gtt_offset);
1157         if (obj_priv->agp_mem == NULL) {
1158                 i915_gem_object_free_page_list(obj);
1159                 drm_mm_put_block(obj_priv->gtt_space);
1160                 obj_priv->gtt_space = NULL;
1161                 return -ENOMEM;
1162         }
1163         atomic_inc(&dev->gtt_count);
1164         atomic_add(obj->size, &dev->gtt_memory);
1165
1166         /* Assert that the object is not currently in any GPU domain. As it
1167          * wasn't in the GTT, there shouldn't be any way it could have been in
1168          * a GPU cache
1169          */
1170         BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1171         BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1172
1173         return 0;
1174 }
1175
1176 void
1177 i915_gem_clflush_object(struct drm_gem_object *obj)
1178 {
1179         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
1180
1181         /* If we don't have a page list set up, then we're not pinned
1182          * to GPU, and we can ignore the cache flush because it'll happen
1183          * again at bind time.
1184          */
1185         if (obj_priv->page_list == NULL)
1186                 return;
1187
1188         drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1189 }
1190
1191 /*
1192  * Set the next domain for the specified object. This
1193  * may not actually perform the necessary flushing/invaliding though,
1194  * as that may want to be batched with other set_domain operations
1195  *
1196  * This is (we hope) the only really tricky part of gem. The goal
1197  * is fairly simple -- track which caches hold bits of the object
1198  * and make sure they remain coherent. A few concrete examples may
1199  * help to explain how it works. For shorthand, we use the notation
1200  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1201  * a pair of read and write domain masks.
1202  *
1203  * Case 1: the batch buffer
1204  *
1205  *      1. Allocated
1206  *      2. Written by CPU
1207  *      3. Mapped to GTT
1208  *      4. Read by GPU
1209  *      5. Unmapped from GTT
1210  *      6. Freed
1211  *
1212  *      Let's take these a step at a time
1213  *
1214  *      1. Allocated
1215  *              Pages allocated from the kernel may still have
1216  *              cache contents, so we set them to (CPU, CPU) always.
1217  *      2. Written by CPU (using pwrite)
1218  *              The pwrite function calls set_domain (CPU, CPU) and
1219  *              this function does nothing (as nothing changes)
1220  *      3. Mapped by GTT
1221  *              This function asserts that the object is not
1222  *              currently in any GPU-based read or write domains
1223  *      4. Read by GPU
1224  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
1225  *              As write_domain is zero, this function adds in the
1226  *              current read domains (CPU+COMMAND, 0).
1227  *              flush_domains is set to CPU.
1228  *              invalidate_domains is set to COMMAND
1229  *              clflush is run to get data out of the CPU caches
1230  *              then i915_dev_set_domain calls i915_gem_flush to
1231  *              emit an MI_FLUSH and drm_agp_chipset_flush
1232  *      5. Unmapped from GTT
1233  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
1234  *              flush_domains and invalidate_domains end up both zero
1235  *              so no flushing/invalidating happens
1236  *      6. Freed
1237  *              yay, done
1238  *
1239  * Case 2: The shared render buffer
1240  *
1241  *      1. Allocated
1242  *      2. Mapped to GTT
1243  *      3. Read/written by GPU
1244  *      4. set_domain to (CPU,CPU)
1245  *      5. Read/written by CPU
1246  *      6. Read/written by GPU
1247  *
1248  *      1. Allocated
1249  *              Same as last example, (CPU, CPU)
1250  *      2. Mapped to GTT
1251  *              Nothing changes (assertions find that it is not in the GPU)
1252  *      3. Read/written by GPU
1253  *              execbuffer calls set_domain (RENDER, RENDER)
1254  *              flush_domains gets CPU
1255  *              invalidate_domains gets GPU
1256  *              clflush (obj)
1257  *              MI_FLUSH and drm_agp_chipset_flush
1258  *      4. set_domain (CPU, CPU)
1259  *              flush_domains gets GPU
1260  *              invalidate_domains gets CPU
1261  *              wait_rendering (obj) to make sure all drawing is complete.
1262  *              This will include an MI_FLUSH to get the data from GPU
1263  *              to memory
1264  *              clflush (obj) to invalidate the CPU cache
1265  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1266  *      5. Read/written by CPU
1267  *              cache lines are loaded and dirtied
1268  *      6. Read written by GPU
1269  *              Same as last GPU access
1270  *
1271  * Case 3: The constant buffer
1272  *
1273  *      1. Allocated
1274  *      2. Written by CPU
1275  *      3. Read by GPU
1276  *      4. Updated (written) by CPU again
1277  *      5. Read by GPU
1278  *
1279  *      1. Allocated
1280  *              (CPU, CPU)
1281  *      2. Written by CPU
1282  *              (CPU, CPU)
1283  *      3. Read by GPU
1284  *              (CPU+RENDER, 0)
1285  *              flush_domains = CPU
1286  *              invalidate_domains = RENDER
1287  *              clflush (obj)
1288  *              MI_FLUSH
1289  *              drm_agp_chipset_flush
1290  *      4. Updated (written) by CPU again
1291  *              (CPU, CPU)
1292  *              flush_domains = 0 (no previous write domain)
1293  *              invalidate_domains = 0 (no new read domains)
1294  *      5. Read by GPU
1295  *              (CPU+RENDER, 0)
1296  *              flush_domains = CPU
1297  *              invalidate_domains = RENDER
1298  *              clflush (obj)
1299  *              MI_FLUSH
1300  *              drm_agp_chipset_flush
1301  */
1302 static int
1303 i915_gem_object_set_domain(struct drm_gem_object *obj,
1304                             uint32_t read_domains,
1305                             uint32_t write_domain)
1306 {
1307         struct drm_device               *dev = obj->dev;
1308         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
1309         uint32_t                        invalidate_domains = 0;
1310         uint32_t                        flush_domains = 0;
1311         int                             ret;
1312
1313 #if WATCH_BUF
1314         DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1315                  __func__, obj,
1316                  obj->read_domains, read_domains,
1317                  obj->write_domain, write_domain);
1318 #endif
1319         /*
1320          * If the object isn't moving to a new write domain,
1321          * let the object stay in multiple read domains
1322          */
1323         if (write_domain == 0)
1324                 read_domains |= obj->read_domains;
1325         else
1326                 obj_priv->dirty = 1;
1327
1328         /*
1329          * Flush the current write domain if
1330          * the new read domains don't match. Invalidate
1331          * any read domains which differ from the old
1332          * write domain
1333          */
1334         if (obj->write_domain && obj->write_domain != read_domains) {
1335                 flush_domains |= obj->write_domain;
1336                 invalidate_domains |= read_domains & ~obj->write_domain;
1337         }
1338         /*
1339          * Invalidate any read caches which may have
1340          * stale data. That is, any new read domains.
1341          */
1342         invalidate_domains |= read_domains & ~obj->read_domains;
1343         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
1344 #if WATCH_BUF
1345                 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1346                          __func__, flush_domains, invalidate_domains);
1347 #endif
1348                 /*
1349                  * If we're invaliding the CPU cache and flushing a GPU cache,
1350                  * then pause for rendering so that the GPU caches will be
1351                  * flushed before the cpu cache is invalidated
1352                  */
1353                 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1354                     (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1355                                        I915_GEM_DOMAIN_GTT))) {
1356                         ret = i915_gem_object_wait_rendering(obj);
1357                         if (ret)
1358                                 return ret;
1359                 }
1360                 i915_gem_clflush_object(obj);
1361         }
1362
1363         if ((write_domain | flush_domains) != 0)
1364                 obj->write_domain = write_domain;
1365
1366         /* If we're invalidating the CPU domain, clear the per-page CPU
1367          * domain list as well.
1368          */
1369         if (obj_priv->page_cpu_valid != NULL &&
1370             (write_domain != 0 ||
1371              read_domains & I915_GEM_DOMAIN_CPU)) {
1372                 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1373                          DRM_MEM_DRIVER);
1374                 obj_priv->page_cpu_valid = NULL;
1375         }
1376         obj->read_domains = read_domains;
1377
1378         dev->invalidate_domains |= invalidate_domains;
1379         dev->flush_domains |= flush_domains;
1380 #if WATCH_BUF
1381         DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1382                  __func__,
1383                  obj->read_domains, obj->write_domain,
1384                  dev->invalidate_domains, dev->flush_domains);
1385 #endif
1386         return 0;
1387 }
1388
1389 /**
1390  * Set the read/write domain on a range of the object.
1391  *
1392  * Currently only implemented for CPU reads, otherwise drops to normal
1393  * i915_gem_object_set_domain().
1394  */
1395 static int
1396 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1397                                  uint64_t offset,
1398                                  uint64_t size,
1399                                  uint32_t read_domains,
1400                                  uint32_t write_domain)
1401 {
1402         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1403         int ret, i;
1404
1405         if (obj->read_domains & I915_GEM_DOMAIN_CPU)
1406                 return 0;
1407
1408         if (read_domains != I915_GEM_DOMAIN_CPU ||
1409             write_domain != 0)
1410                 return i915_gem_object_set_domain(obj,
1411                                                   read_domains, write_domain);
1412
1413         /* Wait on any GPU rendering to the object to be flushed. */
1414         if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
1415                 ret = i915_gem_object_wait_rendering(obj);
1416                 if (ret)
1417                         return ret;
1418         }
1419
1420         if (obj_priv->page_cpu_valid == NULL) {
1421                 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1422                                                       DRM_MEM_DRIVER);
1423         }
1424
1425         /* Flush the cache on any pages that are still invalid from the CPU's
1426          * perspective.
1427          */
1428         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
1429                 if (obj_priv->page_cpu_valid[i])
1430                         continue;
1431
1432                 drm_clflush_pages(obj_priv->page_list + i, 1);
1433
1434                 obj_priv->page_cpu_valid[i] = 1;
1435         }
1436
1437         return 0;
1438 }
1439
1440 /**
1441  * Once all of the objects have been set in the proper domain,
1442  * perform the necessary flush and invalidate operations.
1443  *
1444  * Returns the write domains flushed, for use in flush tracking.
1445  */
1446 static uint32_t
1447 i915_gem_dev_set_domain(struct drm_device *dev)
1448 {
1449         uint32_t flush_domains = dev->flush_domains;
1450
1451         /*
1452          * Now that all the buffers are synced to the proper domains,
1453          * flush and invalidate the collected domains
1454          */
1455         if (dev->invalidate_domains | dev->flush_domains) {
1456 #if WATCH_EXEC
1457                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1458                           __func__,
1459                          dev->invalidate_domains,
1460                          dev->flush_domains);
1461 #endif
1462                 i915_gem_flush(dev,
1463                                dev->invalidate_domains,
1464                                dev->flush_domains);
1465                 dev->invalidate_domains = 0;
1466                 dev->flush_domains = 0;
1467         }
1468
1469         return flush_domains;
1470 }
1471
1472 /**
1473  * Pin an object to the GTT and evaluate the relocations landing in it.
1474  */
1475 static int
1476 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1477                                  struct drm_file *file_priv,
1478                                  struct drm_i915_gem_exec_object *entry)
1479 {
1480         struct drm_device *dev = obj->dev;
1481         struct drm_i915_gem_relocation_entry reloc;
1482         struct drm_i915_gem_relocation_entry __user *relocs;
1483         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1484         int i, ret;
1485         uint32_t last_reloc_offset = -1;
1486         void *reloc_page = NULL;
1487
1488         /* Choose the GTT offset for our buffer and put it there. */
1489         ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
1490         if (ret)
1491                 return ret;
1492
1493         entry->offset = obj_priv->gtt_offset;
1494
1495         relocs = (struct drm_i915_gem_relocation_entry __user *)
1496                  (uintptr_t) entry->relocs_ptr;
1497         /* Apply the relocations, using the GTT aperture to avoid cache
1498          * flushing requirements.
1499          */
1500         for (i = 0; i < entry->relocation_count; i++) {
1501                 struct drm_gem_object *target_obj;
1502                 struct drm_i915_gem_object *target_obj_priv;
1503                 uint32_t reloc_val, reloc_offset, *reloc_entry;
1504                 int ret;
1505
1506                 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
1507                 if (ret != 0) {
1508                         i915_gem_object_unpin(obj);
1509                         return ret;
1510                 }
1511
1512                 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
1513                                                    reloc.target_handle);
1514                 if (target_obj == NULL) {
1515                         i915_gem_object_unpin(obj);
1516                         return -EBADF;
1517                 }
1518                 target_obj_priv = target_obj->driver_private;
1519
1520                 /* The target buffer should have appeared before us in the
1521                  * exec_object list, so it should have a GTT space bound by now.
1522                  */
1523                 if (target_obj_priv->gtt_space == NULL) {
1524                         DRM_ERROR("No GTT space found for object %d\n",
1525                                   reloc.target_handle);
1526                         drm_gem_object_unreference(target_obj);
1527                         i915_gem_object_unpin(obj);
1528                         return -EINVAL;
1529                 }
1530
1531                 if (reloc.offset > obj->size - 4) {
1532                         DRM_ERROR("Relocation beyond object bounds: "
1533                                   "obj %p target %d offset %d size %d.\n",
1534                                   obj, reloc.target_handle,
1535                                   (int) reloc.offset, (int) obj->size);
1536                         drm_gem_object_unreference(target_obj);
1537                         i915_gem_object_unpin(obj);
1538                         return -EINVAL;
1539                 }
1540                 if (reloc.offset & 3) {
1541                         DRM_ERROR("Relocation not 4-byte aligned: "
1542                                   "obj %p target %d offset %d.\n",
1543                                   obj, reloc.target_handle,
1544                                   (int) reloc.offset);
1545                         drm_gem_object_unreference(target_obj);
1546                         i915_gem_object_unpin(obj);
1547                         return -EINVAL;
1548                 }
1549
1550                 if (reloc.write_domain && target_obj->pending_write_domain &&
1551                     reloc.write_domain != target_obj->pending_write_domain) {
1552                         DRM_ERROR("Write domain conflict: "
1553                                   "obj %p target %d offset %d "
1554                                   "new %08x old %08x\n",
1555                                   obj, reloc.target_handle,
1556                                   (int) reloc.offset,
1557                                   reloc.write_domain,
1558                                   target_obj->pending_write_domain);
1559                         drm_gem_object_unreference(target_obj);
1560                         i915_gem_object_unpin(obj);
1561                         return -EINVAL;
1562                 }
1563
1564 #if WATCH_RELOC
1565                 DRM_INFO("%s: obj %p offset %08x target %d "
1566                          "read %08x write %08x gtt %08x "
1567                          "presumed %08x delta %08x\n",
1568                          __func__,
1569                          obj,
1570                          (int) reloc.offset,
1571                          (int) reloc.target_handle,
1572                          (int) reloc.read_domains,
1573                          (int) reloc.write_domain,
1574                          (int) target_obj_priv->gtt_offset,
1575                          (int) reloc.presumed_offset,
1576                          reloc.delta);
1577 #endif
1578
1579                 target_obj->pending_read_domains |= reloc.read_domains;
1580                 target_obj->pending_write_domain |= reloc.write_domain;
1581
1582                 /* If the relocation already has the right value in it, no
1583                  * more work needs to be done.
1584                  */
1585                 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
1586                         drm_gem_object_unreference(target_obj);
1587                         continue;
1588                 }
1589
1590                 /* Now that we're going to actually write some data in,
1591                  * make sure that any rendering using this buffer's contents
1592                  * is completed.
1593                  */
1594                 i915_gem_object_wait_rendering(obj);
1595
1596                 /* As we're writing through the gtt, flush
1597                  * any CPU writes before we write the relocations
1598                  */
1599                 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1600                         i915_gem_clflush_object(obj);
1601                         drm_agp_chipset_flush(dev);
1602                         obj->write_domain = 0;
1603                 }
1604
1605                 /* Map the page containing the relocation we're going to
1606                  * perform.
1607                  */
1608                 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1609                 if (reloc_page == NULL ||
1610                     (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
1611                     (reloc_offset & ~(PAGE_SIZE - 1))) {
1612                         if (reloc_page != NULL)
1613                                 iounmap(reloc_page);
1614
1615                         reloc_page = ioremap(dev->agp->base +
1616                                              (reloc_offset & ~(PAGE_SIZE - 1)),
1617                                              PAGE_SIZE);
1618                         last_reloc_offset = reloc_offset;
1619                         if (reloc_page == NULL) {
1620                                 drm_gem_object_unreference(target_obj);
1621                                 i915_gem_object_unpin(obj);
1622                                 return -ENOMEM;
1623                         }
1624                 }
1625
1626                 reloc_entry = (uint32_t *)((char *)reloc_page +
1627                                            (reloc_offset & (PAGE_SIZE - 1)));
1628                 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1629
1630 #if WATCH_BUF
1631                 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1632                           obj, (unsigned int) reloc.offset,
1633                           readl(reloc_entry), reloc_val);
1634 #endif
1635                 writel(reloc_val, reloc_entry);
1636
1637                 /* Write the updated presumed offset for this entry back out
1638                  * to the user.
1639                  */
1640                 reloc.presumed_offset = target_obj_priv->gtt_offset;
1641                 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
1642                 if (ret != 0) {
1643                         drm_gem_object_unreference(target_obj);
1644                         i915_gem_object_unpin(obj);
1645                         return ret;
1646                 }
1647
1648                 drm_gem_object_unreference(target_obj);
1649         }
1650
1651         if (reloc_page != NULL)
1652                 iounmap(reloc_page);
1653
1654 #if WATCH_BUF
1655         if (0)
1656                 i915_gem_dump_object(obj, 128, __func__, ~0);
1657 #endif
1658         return 0;
1659 }
1660
1661 /** Dispatch a batchbuffer to the ring
1662  */
1663 static int
1664 i915_dispatch_gem_execbuffer(struct drm_device *dev,
1665                               struct drm_i915_gem_execbuffer *exec,
1666                               uint64_t exec_offset)
1667 {
1668         drm_i915_private_t *dev_priv = dev->dev_private;
1669         struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
1670                                              (uintptr_t) exec->cliprects_ptr;
1671         int nbox = exec->num_cliprects;
1672         int i = 0, count;
1673         uint32_t        exec_start, exec_len;
1674         RING_LOCALS;
1675
1676         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
1677         exec_len = (uint32_t) exec->batch_len;
1678
1679         if ((exec_start | exec_len) & 0x7) {
1680                 DRM_ERROR("alignment\n");
1681                 return -EINVAL;
1682         }
1683
1684         if (!exec_start)
1685                 return -EINVAL;
1686
1687         count = nbox ? nbox : 1;
1688
1689         for (i = 0; i < count; i++) {
1690                 if (i < nbox) {
1691                         int ret = i915_emit_box(dev, boxes, i,
1692                                                 exec->DR1, exec->DR4);
1693                         if (ret)
1694                                 return ret;
1695                 }
1696
1697                 if (IS_I830(dev) || IS_845G(dev)) {
1698                         BEGIN_LP_RING(4);
1699                         OUT_RING(MI_BATCH_BUFFER);
1700                         OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1701                         OUT_RING(exec_start + exec_len - 4);
1702                         OUT_RING(0);
1703                         ADVANCE_LP_RING();
1704                 } else {
1705                         BEGIN_LP_RING(2);
1706                         if (IS_I965G(dev)) {
1707                                 OUT_RING(MI_BATCH_BUFFER_START |
1708                                          (2 << 6) |
1709                                          MI_BATCH_NON_SECURE_I965);
1710                                 OUT_RING(exec_start);
1711                         } else {
1712                                 OUT_RING(MI_BATCH_BUFFER_START |
1713                                          (2 << 6));
1714                                 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1715                         }
1716                         ADVANCE_LP_RING();
1717                 }
1718         }
1719
1720         /* XXX breadcrumb */
1721         return 0;
1722 }
1723
1724 /* Throttle our rendering by waiting until the ring has completed our requests
1725  * emitted over 20 msec ago.
1726  *
1727  * This should get us reasonable parallelism between CPU and GPU but also
1728  * relatively low latency when blocking on a particular request to finish.
1729  */
1730 static int
1731 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
1732 {
1733         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1734         int ret = 0;
1735         uint32_t seqno;
1736
1737         mutex_lock(&dev->struct_mutex);
1738         seqno = i915_file_priv->mm.last_gem_throttle_seqno;
1739         i915_file_priv->mm.last_gem_throttle_seqno =
1740                 i915_file_priv->mm.last_gem_seqno;
1741         if (seqno)
1742                 ret = i915_wait_request(dev, seqno);
1743         mutex_unlock(&dev->struct_mutex);
1744         return ret;
1745 }
1746
1747 int
1748 i915_gem_execbuffer(struct drm_device *dev, void *data,
1749                     struct drm_file *file_priv)
1750 {
1751         drm_i915_private_t *dev_priv = dev->dev_private;
1752         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1753         struct drm_i915_gem_execbuffer *args = data;
1754         struct drm_i915_gem_exec_object *exec_list = NULL;
1755         struct drm_gem_object **object_list = NULL;
1756         struct drm_gem_object *batch_obj;
1757         int ret, i, pinned = 0;
1758         uint64_t exec_offset;
1759         uint32_t seqno, flush_domains;
1760
1761 #if WATCH_EXEC
1762         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1763                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1764 #endif
1765
1766         /* Copy in the exec list from userland */
1767         exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
1768                                DRM_MEM_DRIVER);
1769         object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
1770                                  DRM_MEM_DRIVER);
1771         if (exec_list == NULL || object_list == NULL) {
1772                 DRM_ERROR("Failed to allocate exec or object list "
1773                           "for %d buffers\n",
1774                           args->buffer_count);
1775                 ret = -ENOMEM;
1776                 goto pre_mutex_err;
1777         }
1778         ret = copy_from_user(exec_list,
1779                              (struct drm_i915_relocation_entry __user *)
1780                              (uintptr_t) args->buffers_ptr,
1781                              sizeof(*exec_list) * args->buffer_count);
1782         if (ret != 0) {
1783                 DRM_ERROR("copy %d exec entries failed %d\n",
1784                           args->buffer_count, ret);
1785                 goto pre_mutex_err;
1786         }
1787
1788         mutex_lock(&dev->struct_mutex);
1789
1790         i915_verify_inactive(dev, __FILE__, __LINE__);
1791
1792         if (dev_priv->mm.wedged) {
1793                 DRM_ERROR("Execbuf while wedged\n");
1794                 mutex_unlock(&dev->struct_mutex);
1795                 return -EIO;
1796         }
1797
1798         if (dev_priv->mm.suspended) {
1799                 DRM_ERROR("Execbuf while VT-switched.\n");
1800                 mutex_unlock(&dev->struct_mutex);
1801                 return -EBUSY;
1802         }
1803
1804         /* Zero the gloabl flush/invalidate flags. These
1805          * will be modified as each object is bound to the
1806          * gtt
1807          */
1808         dev->invalidate_domains = 0;
1809         dev->flush_domains = 0;
1810
1811         /* Look up object handles and perform the relocations */
1812         for (i = 0; i < args->buffer_count; i++) {
1813                 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1814                                                        exec_list[i].handle);
1815                 if (object_list[i] == NULL) {
1816                         DRM_ERROR("Invalid object handle %d at index %d\n",
1817                                    exec_list[i].handle, i);
1818                         ret = -EBADF;
1819                         goto err;
1820                 }
1821
1822                 object_list[i]->pending_read_domains = 0;
1823                 object_list[i]->pending_write_domain = 0;
1824                 ret = i915_gem_object_pin_and_relocate(object_list[i],
1825                                                        file_priv,
1826                                                        &exec_list[i]);
1827                 if (ret) {
1828                         DRM_ERROR("object bind and relocate failed %d\n", ret);
1829                         goto err;
1830                 }
1831                 pinned = i + 1;
1832         }
1833
1834         /* Set the pending read domains for the batch buffer to COMMAND */
1835         batch_obj = object_list[args->buffer_count-1];
1836         batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1837         batch_obj->pending_write_domain = 0;
1838
1839         i915_verify_inactive(dev, __FILE__, __LINE__);
1840
1841         for (i = 0; i < args->buffer_count; i++) {
1842                 struct drm_gem_object *obj = object_list[i];
1843                 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1844
1845                 if (obj_priv->gtt_space == NULL) {
1846                         /* We evicted the buffer in the process of validating
1847                          * our set of buffers in.  We could try to recover by
1848                          * kicking them everything out and trying again from
1849                          * the start.
1850                          */
1851                         ret = -ENOMEM;
1852                         goto err;
1853                 }
1854
1855                 /* make sure all previous memory operations have passed */
1856                 ret = i915_gem_object_set_domain(obj,
1857                                                  obj->pending_read_domains,
1858                                                  obj->pending_write_domain);
1859                 if (ret)
1860                         goto err;
1861         }
1862
1863         i915_verify_inactive(dev, __FILE__, __LINE__);
1864
1865         /* Flush/invalidate caches and chipset buffer */
1866         flush_domains = i915_gem_dev_set_domain(dev);
1867
1868         i915_verify_inactive(dev, __FILE__, __LINE__);
1869
1870 #if WATCH_COHERENCY
1871         for (i = 0; i < args->buffer_count; i++) {
1872                 i915_gem_object_check_coherency(object_list[i],
1873                                                 exec_list[i].handle);
1874         }
1875 #endif
1876
1877         exec_offset = exec_list[args->buffer_count - 1].offset;
1878
1879 #if WATCH_EXEC
1880         i915_gem_dump_object(object_list[args->buffer_count - 1],
1881                               args->batch_len,
1882                               __func__,
1883                               ~0);
1884 #endif
1885
1886         (void)i915_add_request(dev, flush_domains);
1887
1888         /* Exec the batchbuffer */
1889         ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1890         if (ret) {
1891                 DRM_ERROR("dispatch failed %d\n", ret);
1892                 goto err;
1893         }
1894
1895         /*
1896          * Ensure that the commands in the batch buffer are
1897          * finished before the interrupt fires
1898          */
1899         flush_domains = i915_retire_commands(dev);
1900
1901         i915_verify_inactive(dev, __FILE__, __LINE__);
1902
1903         /*
1904          * Get a seqno representing the execution of the current buffer,
1905          * which we can wait on.  We would like to mitigate these interrupts,
1906          * likely by only creating seqnos occasionally (so that we have
1907          * *some* interrupts representing completion of buffers that we can
1908          * wait on when trying to clear up gtt space).
1909          */
1910         seqno = i915_add_request(dev, flush_domains);
1911         BUG_ON(seqno == 0);
1912         i915_file_priv->mm.last_gem_seqno = seqno;
1913         for (i = 0; i < args->buffer_count; i++) {
1914                 struct drm_gem_object *obj = object_list[i];
1915                 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1916
1917                 i915_gem_object_move_to_active(obj);
1918                 obj_priv->last_rendering_seqno = seqno;
1919 #if WATCH_LRU
1920                 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1921 #endif
1922         }
1923 #if WATCH_LRU
1924         i915_dump_lru(dev, __func__);
1925 #endif
1926
1927         i915_verify_inactive(dev, __FILE__, __LINE__);
1928
1929         /* Copy the new buffer offsets back to the user's exec list. */
1930         ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1931                            (uintptr_t) args->buffers_ptr,
1932                            exec_list,
1933                            sizeof(*exec_list) * args->buffer_count);
1934         if (ret)
1935                 DRM_ERROR("failed to copy %d exec entries "
1936                           "back to user (%d)\n",
1937                            args->buffer_count, ret);
1938 err:
1939         if (object_list != NULL) {
1940                 for (i = 0; i < pinned; i++)
1941                         i915_gem_object_unpin(object_list[i]);
1942
1943                 for (i = 0; i < args->buffer_count; i++)
1944                         drm_gem_object_unreference(object_list[i]);
1945         }
1946         mutex_unlock(&dev->struct_mutex);
1947
1948 pre_mutex_err:
1949         drm_free(object_list, sizeof(*object_list) * args->buffer_count,
1950                  DRM_MEM_DRIVER);
1951         drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
1952                  DRM_MEM_DRIVER);
1953
1954         return ret;
1955 }
1956
1957 int
1958 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
1959 {
1960         struct drm_device *dev = obj->dev;
1961         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1962         int ret;
1963
1964         i915_verify_inactive(dev, __FILE__, __LINE__);
1965         if (obj_priv->gtt_space == NULL) {
1966                 ret = i915_gem_object_bind_to_gtt(obj, alignment);
1967                 if (ret != 0) {
1968                         DRM_ERROR("Failure to bind: %d", ret);
1969                         return ret;
1970                 }
1971         }
1972         obj_priv->pin_count++;
1973
1974         /* If the object is not active and not pending a flush,
1975          * remove it from the inactive list
1976          */
1977         if (obj_priv->pin_count == 1) {
1978                 atomic_inc(&dev->pin_count);
1979                 atomic_add(obj->size, &dev->pin_memory);
1980                 if (!obj_priv->active &&
1981                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
1982                                            I915_GEM_DOMAIN_GTT)) == 0 &&
1983                     !list_empty(&obj_priv->list))
1984                         list_del_init(&obj_priv->list);
1985         }
1986         i915_verify_inactive(dev, __FILE__, __LINE__);
1987
1988         return 0;
1989 }
1990
1991 void
1992 i915_gem_object_unpin(struct drm_gem_object *obj)
1993 {
1994         struct drm_device *dev = obj->dev;
1995         drm_i915_private_t *dev_priv = dev->dev_private;
1996         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1997
1998         i915_verify_inactive(dev, __FILE__, __LINE__);
1999         obj_priv->pin_count--;
2000         BUG_ON(obj_priv->pin_count < 0);
2001         BUG_ON(obj_priv->gtt_space == NULL);
2002
2003         /* If the object is no longer pinned, and is
2004          * neither active nor being flushed, then stick it on
2005          * the inactive list
2006          */
2007         if (obj_priv->pin_count == 0) {
2008                 if (!obj_priv->active &&
2009                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2010                                            I915_GEM_DOMAIN_GTT)) == 0)
2011                         list_move_tail(&obj_priv->list,
2012                                        &dev_priv->mm.inactive_list);
2013                 atomic_dec(&dev->pin_count);
2014                 atomic_sub(obj->size, &dev->pin_memory);
2015         }
2016         i915_verify_inactive(dev, __FILE__, __LINE__);
2017 }
2018
2019 int
2020 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2021                    struct drm_file *file_priv)
2022 {
2023         struct drm_i915_gem_pin *args = data;
2024         struct drm_gem_object *obj;
2025         struct drm_i915_gem_object *obj_priv;
2026         int ret;
2027
2028         mutex_lock(&dev->struct_mutex);
2029
2030         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2031         if (obj == NULL) {
2032                 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2033                           args->handle);
2034                 mutex_unlock(&dev->struct_mutex);
2035                 return -EBADF;
2036         }
2037         obj_priv = obj->driver_private;
2038
2039         ret = i915_gem_object_pin(obj, args->alignment);
2040         if (ret != 0) {
2041                 drm_gem_object_unreference(obj);
2042                 mutex_unlock(&dev->struct_mutex);
2043                 return ret;
2044         }
2045
2046         /* XXX - flush the CPU caches for pinned objects
2047          * as the X server doesn't manage domains yet
2048          */
2049         if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
2050                 i915_gem_clflush_object(obj);
2051                 drm_agp_chipset_flush(dev);
2052                 obj->write_domain = 0;
2053         }
2054         args->offset = obj_priv->gtt_offset;
2055         drm_gem_object_unreference(obj);
2056         mutex_unlock(&dev->struct_mutex);
2057
2058         return 0;
2059 }
2060
2061 int
2062 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2063                      struct drm_file *file_priv)
2064 {
2065         struct drm_i915_gem_pin *args = data;
2066         struct drm_gem_object *obj;
2067
2068         mutex_lock(&dev->struct_mutex);
2069
2070         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2071         if (obj == NULL) {
2072                 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2073                           args->handle);
2074                 mutex_unlock(&dev->struct_mutex);
2075                 return -EBADF;
2076         }
2077
2078         i915_gem_object_unpin(obj);
2079
2080         drm_gem_object_unreference(obj);
2081         mutex_unlock(&dev->struct_mutex);
2082         return 0;
2083 }
2084
2085 int
2086 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2087                     struct drm_file *file_priv)
2088 {
2089         struct drm_i915_gem_busy *args = data;
2090         struct drm_gem_object *obj;
2091         struct drm_i915_gem_object *obj_priv;
2092
2093         mutex_lock(&dev->struct_mutex);
2094         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2095         if (obj == NULL) {
2096                 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2097                           args->handle);
2098                 mutex_unlock(&dev->struct_mutex);
2099                 return -EBADF;
2100         }
2101
2102         obj_priv = obj->driver_private;
2103         args->busy = obj_priv->active;
2104
2105         drm_gem_object_unreference(obj);
2106         mutex_unlock(&dev->struct_mutex);
2107         return 0;
2108 }
2109
2110 int
2111 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2112                         struct drm_file *file_priv)
2113 {
2114     return i915_gem_ring_throttle(dev, file_priv);
2115 }
2116
2117 int i915_gem_init_object(struct drm_gem_object *obj)
2118 {
2119         struct drm_i915_gem_object *obj_priv;
2120
2121         obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2122         if (obj_priv == NULL)
2123                 return -ENOMEM;
2124
2125         /*
2126          * We've just allocated pages from the kernel,
2127          * so they've just been written by the CPU with
2128          * zeros. They'll need to be clflushed before we
2129          * use them with the GPU.
2130          */
2131         obj->write_domain = I915_GEM_DOMAIN_CPU;
2132         obj->read_domains = I915_GEM_DOMAIN_CPU;
2133
2134         obj->driver_private = obj_priv;
2135         obj_priv->obj = obj;
2136         INIT_LIST_HEAD(&obj_priv->list);
2137         return 0;
2138 }
2139
2140 void i915_gem_free_object(struct drm_gem_object *obj)
2141 {
2142         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2143
2144         while (obj_priv->pin_count > 0)
2145                 i915_gem_object_unpin(obj);
2146
2147         i915_gem_object_unbind(obj);
2148
2149         drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2150         drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2151 }
2152
2153 static int
2154 i915_gem_set_domain(struct drm_gem_object *obj,
2155                     struct drm_file *file_priv,
2156                     uint32_t read_domains,
2157                     uint32_t write_domain)
2158 {
2159         struct drm_device *dev = obj->dev;
2160         int ret;
2161         uint32_t flush_domains;
2162
2163         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2164
2165         ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2166         if (ret)
2167                 return ret;
2168         flush_domains = i915_gem_dev_set_domain(obj->dev);
2169
2170         if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2171                 (void) i915_add_request(dev, flush_domains);
2172
2173         return 0;
2174 }
2175
2176 /** Unbinds all objects that are on the given buffer list. */
2177 static int
2178 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2179 {
2180         struct drm_gem_object *obj;
2181         struct drm_i915_gem_object *obj_priv;
2182         int ret;
2183
2184         while (!list_empty(head)) {
2185                 obj_priv = list_first_entry(head,
2186                                             struct drm_i915_gem_object,
2187                                             list);
2188                 obj = obj_priv->obj;
2189
2190                 if (obj_priv->pin_count != 0) {
2191                         DRM_ERROR("Pinned object in unbind list\n");
2192                         mutex_unlock(&dev->struct_mutex);
2193                         return -EINVAL;
2194                 }
2195
2196                 ret = i915_gem_object_unbind(obj);
2197                 if (ret != 0) {
2198                         DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2199                                   ret);
2200                         mutex_unlock(&dev->struct_mutex);
2201                         return ret;
2202                 }
2203         }
2204
2205
2206         return 0;
2207 }
2208
2209 static int
2210 i915_gem_idle(struct drm_device *dev)
2211 {
2212         drm_i915_private_t *dev_priv = dev->dev_private;
2213         uint32_t seqno, cur_seqno, last_seqno;
2214         int stuck, ret;
2215
2216         if (dev_priv->mm.suspended)
2217                 return 0;
2218
2219         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
2220          * We need to replace this with a semaphore, or something.
2221          */
2222         dev_priv->mm.suspended = 1;
2223
2224         i915_kernel_lost_context(dev);
2225
2226         /* Flush the GPU along with all non-CPU write domains
2227          */
2228         i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2229                        ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2230         seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
2231                                         I915_GEM_DOMAIN_GTT));
2232
2233         if (seqno == 0) {
2234                 mutex_unlock(&dev->struct_mutex);
2235                 return -ENOMEM;
2236         }
2237
2238         dev_priv->mm.waiting_gem_seqno = seqno;
2239         last_seqno = 0;
2240         stuck = 0;
2241         for (;;) {
2242                 cur_seqno = i915_get_gem_seqno(dev);
2243                 if (i915_seqno_passed(cur_seqno, seqno))
2244                         break;
2245                 if (last_seqno == cur_seqno) {
2246                         if (stuck++ > 100) {
2247                                 DRM_ERROR("hardware wedged\n");
2248                                 dev_priv->mm.wedged = 1;
2249                                 DRM_WAKEUP(&dev_priv->irq_queue);
2250                                 break;
2251                         }
2252                 }
2253                 msleep(10);
2254                 last_seqno = cur_seqno;
2255         }
2256         dev_priv->mm.waiting_gem_seqno = 0;
2257
2258         i915_gem_retire_requests(dev);
2259
2260         /* Active and flushing should now be empty as we've
2261          * waited for a sequence higher than any pending execbuffer
2262          */
2263         BUG_ON(!list_empty(&dev_priv->mm.active_list));
2264         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2265
2266         /* Request should now be empty as we've also waited
2267          * for the last request in the list
2268          */
2269         BUG_ON(!list_empty(&dev_priv->mm.request_list));
2270
2271         /* Move all buffers out of the GTT. */
2272         ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2273         if (ret)
2274                 return ret;
2275
2276         BUG_ON(!list_empty(&dev_priv->mm.active_list));
2277         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2278         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2279         BUG_ON(!list_empty(&dev_priv->mm.request_list));
2280         return 0;
2281 }
2282
2283 static int
2284 i915_gem_init_hws(struct drm_device *dev)
2285 {
2286         drm_i915_private_t *dev_priv = dev->dev_private;
2287         struct drm_gem_object *obj;
2288         struct drm_i915_gem_object *obj_priv;
2289         int ret;
2290
2291         /* If we need a physical address for the status page, it's already
2292          * initialized at driver load time.
2293          */
2294         if (!I915_NEED_GFX_HWS(dev))
2295                 return 0;
2296
2297         obj = drm_gem_object_alloc(dev, 4096);
2298         if (obj == NULL) {
2299                 DRM_ERROR("Failed to allocate status page\n");
2300                 return -ENOMEM;
2301         }
2302         obj_priv = obj->driver_private;
2303
2304         ret = i915_gem_object_pin(obj, 4096);
2305         if (ret != 0) {
2306                 drm_gem_object_unreference(obj);
2307                 return ret;
2308         }
2309
2310         dev_priv->status_gfx_addr = obj_priv->gtt_offset;
2311         dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
2312         dev_priv->hws_map.size = 4096;
2313         dev_priv->hws_map.type = 0;
2314         dev_priv->hws_map.flags = 0;
2315         dev_priv->hws_map.mtrr = 0;
2316
2317         drm_core_ioremap(&dev_priv->hws_map, dev);
2318         if (dev_priv->hws_map.handle == NULL) {
2319                 DRM_ERROR("Failed to map status page.\n");
2320                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2321                 drm_gem_object_unreference(obj);
2322                 return -EINVAL;
2323         }
2324         dev_priv->hws_obj = obj;
2325         dev_priv->hw_status_page = dev_priv->hws_map.handle;
2326         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
2327         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
2328         DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
2329
2330         return 0;
2331 }
2332
2333 static int
2334 i915_gem_init_ringbuffer(struct drm_device *dev)
2335 {
2336         drm_i915_private_t *dev_priv = dev->dev_private;
2337         struct drm_gem_object *obj;
2338         struct drm_i915_gem_object *obj_priv;
2339         int ret;
2340
2341         ret = i915_gem_init_hws(dev);
2342         if (ret != 0)
2343                 return ret;
2344
2345         obj = drm_gem_object_alloc(dev, 128 * 1024);
2346         if (obj == NULL) {
2347                 DRM_ERROR("Failed to allocate ringbuffer\n");
2348                 return -ENOMEM;
2349         }
2350         obj_priv = obj->driver_private;
2351
2352         ret = i915_gem_object_pin(obj, 4096);
2353         if (ret != 0) {
2354                 drm_gem_object_unreference(obj);
2355                 return ret;
2356         }
2357
2358         /* Set up the kernel mapping for the ring. */
2359         dev_priv->ring.Size = obj->size;
2360         dev_priv->ring.tail_mask = obj->size - 1;
2361
2362         dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
2363         dev_priv->ring.map.size = obj->size;
2364         dev_priv->ring.map.type = 0;
2365         dev_priv->ring.map.flags = 0;
2366         dev_priv->ring.map.mtrr = 0;
2367
2368         drm_core_ioremap(&dev_priv->ring.map, dev);
2369         if (dev_priv->ring.map.handle == NULL) {
2370                 DRM_ERROR("Failed to map ringbuffer.\n");
2371                 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2372                 drm_gem_object_unreference(obj);
2373                 return -EINVAL;
2374         }
2375         dev_priv->ring.ring_obj = obj;
2376         dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
2377
2378         /* Stop the ring if it's running. */
2379         I915_WRITE(PRB0_CTL, 0);
2380         I915_WRITE(PRB0_HEAD, 0);
2381         I915_WRITE(PRB0_TAIL, 0);
2382         I915_WRITE(PRB0_START, 0);
2383
2384         /* Initialize the ring. */
2385         I915_WRITE(PRB0_START, obj_priv->gtt_offset);
2386         I915_WRITE(PRB0_CTL,
2387                    ((obj->size - 4096) & RING_NR_PAGES) |
2388                    RING_NO_REPORT |
2389                    RING_VALID);
2390
2391         /* Update our cache of the ring state */
2392         i915_kernel_lost_context(dev);
2393
2394         return 0;
2395 }
2396
2397 static void
2398 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
2399 {
2400         drm_i915_private_t *dev_priv = dev->dev_private;
2401
2402         if (dev_priv->ring.ring_obj == NULL)
2403                 return;
2404
2405         drm_core_ioremapfree(&dev_priv->ring.map, dev);
2406
2407         i915_gem_object_unpin(dev_priv->ring.ring_obj);
2408         drm_gem_object_unreference(dev_priv->ring.ring_obj);
2409         dev_priv->ring.ring_obj = NULL;
2410         memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2411
2412         if (dev_priv->hws_obj != NULL) {
2413                 i915_gem_object_unpin(dev_priv->hws_obj);
2414                 drm_gem_object_unreference(dev_priv->hws_obj);
2415                 dev_priv->hws_obj = NULL;
2416                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2417
2418                 /* Write high address into HWS_PGA when disabling. */
2419                 I915_WRITE(HWS_PGA, 0x1ffff000);
2420         }
2421 }
2422
2423 int
2424 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2425                        struct drm_file *file_priv)
2426 {
2427         drm_i915_private_t *dev_priv = dev->dev_private;
2428         int ret;
2429
2430         if (dev_priv->mm.wedged) {
2431                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2432                 dev_priv->mm.wedged = 0;
2433         }
2434
2435         ret = i915_gem_init_ringbuffer(dev);
2436         if (ret != 0)
2437                 return ret;
2438
2439         mutex_lock(&dev->struct_mutex);
2440         BUG_ON(!list_empty(&dev_priv->mm.active_list));
2441         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2442         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2443         BUG_ON(!list_empty(&dev_priv->mm.request_list));
2444         dev_priv->mm.suspended = 0;
2445         mutex_unlock(&dev->struct_mutex);
2446         return 0;
2447 }
2448
2449 int
2450 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2451                        struct drm_file *file_priv)
2452 {
2453         int ret;
2454
2455         mutex_lock(&dev->struct_mutex);
2456         ret = i915_gem_idle(dev);
2457         if (ret == 0)
2458                 i915_gem_cleanup_ringbuffer(dev);
2459         mutex_unlock(&dev->struct_mutex);
2460
2461         return 0;
2462 }
2463
2464 void
2465 i915_gem_lastclose(struct drm_device *dev)
2466 {
2467         int ret;
2468         drm_i915_private_t *dev_priv = dev->dev_private;
2469
2470         mutex_lock(&dev->struct_mutex);
2471
2472         if (dev_priv->ring.ring_obj != NULL) {
2473                 ret = i915_gem_idle(dev);
2474                 if (ret)
2475                         DRM_ERROR("failed to idle hardware: %d\n", ret);
2476
2477                 i915_gem_cleanup_ringbuffer(dev);
2478         }
2479
2480         mutex_unlock(&dev->struct_mutex);
2481 }
2482
2483 void
2484 i915_gem_load(struct drm_device *dev)
2485 {
2486         drm_i915_private_t *dev_priv = dev->dev_private;
2487
2488         INIT_LIST_HEAD(&dev_priv->mm.active_list);
2489         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
2490         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
2491         INIT_LIST_HEAD(&dev_priv->mm.request_list);
2492         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2493                           i915_gem_retire_work_handler);
2494         INIT_WORK(&dev_priv->mm.vblank_work,
2495                   i915_gem_vblank_work_handler);
2496         dev_priv->mm.next_gem_seqno = 1;
2497
2498         i915_gem_detect_bit_6_swizzle(dev);
2499 }