1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_kms.h"
30 /* Might need a hrtimer here? */
31 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33 static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
34 static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
36 void vmw_display_unit_cleanup(struct vmw_display_unit *du)
38 if (du->cursor_surface)
39 vmw_surface_unreference(&du->cursor_surface);
40 if (du->cursor_dmabuf)
41 vmw_dmabuf_unreference(&du->cursor_dmabuf);
42 drm_crtc_cleanup(&du->crtc);
43 drm_encoder_cleanup(&du->encoder);
44 drm_connector_cleanup(&du->connector);
48 * Display Unit Cursor functions
51 int vmw_cursor_update_image(struct vmw_private *dev_priv,
52 u32 *image, u32 width, u32 height,
53 u32 hotspotX, u32 hotspotY)
57 SVGAFifoCmdDefineAlphaCursor cursor;
59 u32 image_size = width * height * 4;
60 u32 cmd_size = sizeof(*cmd) + image_size;
65 cmd = vmw_fifo_reserve(dev_priv, cmd_size);
66 if (unlikely(cmd == NULL)) {
67 DRM_ERROR("Fifo reserve failed.\n");
71 memset(cmd, 0, sizeof(*cmd));
73 memcpy(&cmd[1], image, image_size);
75 cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
76 cmd->cursor.id = cpu_to_le32(0);
77 cmd->cursor.width = cpu_to_le32(width);
78 cmd->cursor.height = cpu_to_le32(height);
79 cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
80 cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
82 vmw_fifo_commit(dev_priv, cmd_size);
87 void vmw_cursor_update_position(struct vmw_private *dev_priv,
88 bool show, int x, int y)
90 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
93 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
94 iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
95 iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
96 count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
97 iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
100 int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
101 uint32_t handle, uint32_t width, uint32_t height)
103 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
104 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
105 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
106 struct vmw_surface *surface = NULL;
107 struct vmw_dma_buffer *dmabuf = NULL;
111 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
114 if (!surface->snooper.image) {
115 DRM_ERROR("surface not suitable for cursor\n");
119 ret = vmw_user_dmabuf_lookup(tfile,
122 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
128 /* takedown old cursor */
129 if (du->cursor_surface) {
130 du->cursor_surface->snooper.crtc = NULL;
131 vmw_surface_unreference(&du->cursor_surface);
133 if (du->cursor_dmabuf)
134 vmw_dmabuf_unreference(&du->cursor_dmabuf);
136 /* setup new image */
138 /* vmw_user_surface_lookup takes one reference */
139 du->cursor_surface = surface;
141 du->cursor_surface->snooper.crtc = crtc;
142 du->cursor_age = du->cursor_surface->snooper.age;
143 vmw_cursor_update_image(dev_priv, surface->snooper.image,
144 64, 64, du->hotspot_x, du->hotspot_y);
146 struct ttm_bo_kmap_obj map;
147 unsigned long kmap_offset;
148 unsigned long kmap_num;
152 /* vmw_user_surface_lookup takes one reference */
153 du->cursor_dmabuf = dmabuf;
156 kmap_num = (64*64*4) >> PAGE_SHIFT;
158 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
159 if (unlikely(ret != 0)) {
160 DRM_ERROR("reserve failed\n");
164 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
165 if (unlikely(ret != 0))
168 virtual = ttm_kmap_obj_virtual(&map, &dummy);
169 vmw_cursor_update_image(dev_priv, virtual, 64, 64,
170 du->hotspot_x, du->hotspot_y);
174 ttm_bo_unreserve(&dmabuf->base);
177 vmw_cursor_update_position(dev_priv, false, 0, 0);
181 vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
186 int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
188 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
189 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
190 bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
192 du->cursor_x = x + crtc->x;
193 du->cursor_y = y + crtc->y;
195 vmw_cursor_update_position(dev_priv, shown,
196 du->cursor_x, du->cursor_y);
201 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
202 struct ttm_object_file *tfile,
203 struct ttm_buffer_object *bo,
204 SVGA3dCmdHeader *header)
206 struct ttm_bo_kmap_obj map;
207 unsigned long kmap_offset;
208 unsigned long kmap_num;
214 SVGA3dCmdHeader header;
215 SVGA3dCmdSurfaceDMA dma;
219 cmd = container_of(header, struct vmw_dma_cmd, header);
221 /* No snooper installed */
222 if (!srf->snooper.image)
225 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
226 DRM_ERROR("face and mipmap for cursors should never != 0\n");
230 if (cmd->header.size < 64) {
231 DRM_ERROR("at least one full copy box must be given\n");
235 box = (SVGA3dCopyBox *)&cmd[1];
236 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
237 sizeof(SVGA3dCopyBox);
239 if (cmd->dma.guest.pitch != (64 * 4) ||
240 cmd->dma.guest.ptr.offset % PAGE_SIZE ||
241 box->x != 0 || box->y != 0 || box->z != 0 ||
242 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
243 box->w != 64 || box->h != 64 || box->d != 1 ||
245 /* TODO handle none page aligned offsets */
246 /* TODO handle partial uploads and pitch != 256 */
247 /* TODO handle more then one copy (size != 64) */
248 DRM_ERROR("lazy programmer, can't handle weird stuff\n");
252 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
253 kmap_num = (64*64*4) >> PAGE_SHIFT;
255 ret = ttm_bo_reserve(bo, true, false, false, 0);
256 if (unlikely(ret != 0)) {
257 DRM_ERROR("reserve failed\n");
261 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
262 if (unlikely(ret != 0))
265 virtual = ttm_kmap_obj_virtual(&map, &dummy);
267 memcpy(srf->snooper.image, virtual, 64*64*4);
270 /* we can't call this function from this function since execbuf has
271 * reserved fifo space.
273 * if (srf->snooper.crtc)
274 * vmw_ldu_crtc_cursor_update_image(dev_priv,
275 * srf->snooper.image, 64, 64,
276 * du->hotspot_x, du->hotspot_y);
281 ttm_bo_unreserve(bo);
284 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
286 struct drm_device *dev = dev_priv->dev;
287 struct vmw_display_unit *du;
288 struct drm_crtc *crtc;
290 mutex_lock(&dev->mode_config.mutex);
292 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
293 du = vmw_crtc_to_du(crtc);
294 if (!du->cursor_surface ||
295 du->cursor_age == du->cursor_surface->snooper.age)
298 du->cursor_age = du->cursor_surface->snooper.age;
299 vmw_cursor_update_image(dev_priv,
300 du->cursor_surface->snooper.image,
301 64, 64, du->hotspot_x, du->hotspot_y);
304 mutex_unlock(&dev->mode_config.mutex);
308 * Generic framebuffer code
311 int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
312 struct drm_file *file_priv,
313 unsigned int *handle)
322 * Surface framebuffer code
325 #define vmw_framebuffer_to_vfbs(x) \
326 container_of(x, struct vmw_framebuffer_surface, base.base)
328 struct vmw_framebuffer_surface {
329 struct vmw_framebuffer base;
330 struct vmw_surface *surface;
331 struct vmw_dma_buffer *buffer;
332 struct delayed_work d_work;
333 struct mutex work_lock;
335 struct list_head head;
336 struct drm_master *master;
340 * vmw_kms_idle_workqueues - Flush workqueues on this master
342 * @vmaster - Pointer identifying the master, for the surfaces of which
343 * we idle the dirty work queues.
345 * This function should be called with the ttm lock held in exclusive mode
346 * to idle all dirty work queues before the fifo is taken down.
348 * The work task may actually requeue itself, but after the flush returns we're
349 * sure that there's nothing to present, since the ttm lock is held in
350 * exclusive mode, so the fifo will never get used.
353 void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
355 struct vmw_framebuffer_surface *entry;
357 mutex_lock(&vmaster->fb_surf_mutex);
358 list_for_each_entry(entry, &vmaster->fb_surf, head) {
359 if (cancel_delayed_work_sync(&entry->d_work))
360 (void) entry->d_work.work.func(&entry->d_work.work);
362 (void) cancel_delayed_work_sync(&entry->d_work);
364 mutex_unlock(&vmaster->fb_surf_mutex);
367 void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
369 struct vmw_framebuffer_surface *vfbs =
370 vmw_framebuffer_to_vfbs(framebuffer);
371 struct vmw_master *vmaster = vmw_master(vfbs->master);
374 mutex_lock(&vmaster->fb_surf_mutex);
375 list_del(&vfbs->head);
376 mutex_unlock(&vmaster->fb_surf_mutex);
378 cancel_delayed_work_sync(&vfbs->d_work);
379 drm_master_put(&vfbs->master);
380 drm_framebuffer_cleanup(framebuffer);
381 vmw_surface_unreference(&vfbs->surface);
386 static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
388 struct delayed_work *d_work =
389 container_of(work, struct delayed_work, work);
390 struct vmw_framebuffer_surface *vfbs =
391 container_of(d_work, struct vmw_framebuffer_surface, d_work);
392 struct vmw_surface *surf = vfbs->surface;
393 struct drm_framebuffer *framebuffer = &vfbs->base.base;
394 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
397 SVGA3dCmdHeader header;
398 SVGA3dCmdPresent body;
403 * Strictly we should take the ttm_lock in read mode before accessing
404 * the fifo, to make sure the fifo is present and up. However,
405 * instead we flush all workqueues under the ttm lock in exclusive mode
406 * before taking down the fifo.
408 mutex_lock(&vfbs->work_lock);
409 if (!vfbs->present_fs)
412 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
413 if (unlikely(cmd == NULL))
416 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
417 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
418 cmd->body.sid = cpu_to_le32(surf->res.id);
419 cmd->cr.x = cpu_to_le32(0);
420 cmd->cr.y = cpu_to_le32(0);
421 cmd->cr.srcx = cmd->cr.x;
422 cmd->cr.srcy = cmd->cr.y;
423 cmd->cr.w = cpu_to_le32(framebuffer->width);
424 cmd->cr.h = cpu_to_le32(framebuffer->height);
425 vfbs->present_fs = false;
426 vmw_fifo_commit(dev_priv, sizeof(*cmd));
429 * Will not re-add if already pending.
431 schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
433 mutex_unlock(&vfbs->work_lock);
437 int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
438 struct drm_file *file_priv,
439 unsigned flags, unsigned color,
440 struct drm_clip_rect *clips,
443 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
444 struct vmw_master *vmaster = vmw_master(file_priv->master);
445 struct vmw_framebuffer_surface *vfbs =
446 vmw_framebuffer_to_vfbs(framebuffer);
447 struct vmw_surface *surf = vfbs->surface;
448 struct drm_clip_rect norect;
454 SVGA3dCmdHeader header;
455 SVGA3dCmdPresent body;
459 if (unlikely(vfbs->master != file_priv->master))
462 ret = ttm_read_lock(&vmaster->lock, true);
463 if (unlikely(ret != 0))
467 !(dev_priv->fifo.capabilities &
468 SVGA_FIFO_CAP_SCREEN_OBJECT)) {
471 mutex_lock(&vfbs->work_lock);
472 vfbs->present_fs = true;
473 ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
474 mutex_unlock(&vfbs->work_lock);
477 * No work pending, Force immediate present.
479 vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
481 ttm_read_unlock(&vmaster->lock);
488 norect.x1 = norect.y1 = 0;
489 norect.x2 = framebuffer->width;
490 norect.y2 = framebuffer->height;
491 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
493 inc = 2; /* skip source rects */
496 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
497 if (unlikely(cmd == NULL)) {
498 DRM_ERROR("Fifo reserve failed.\n");
499 ttm_read_unlock(&vmaster->lock);
503 memset(cmd, 0, sizeof(*cmd));
505 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
506 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
507 cmd->body.sid = cpu_to_le32(surf->res.id);
509 for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
510 cr->x = cpu_to_le16(clips->x1);
511 cr->y = cpu_to_le16(clips->y1);
514 cr->w = cpu_to_le16(clips->x2 - clips->x1);
515 cr->h = cpu_to_le16(clips->y2 - clips->y1);
518 vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
519 ttm_read_unlock(&vmaster->lock);
523 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
524 .destroy = vmw_framebuffer_surface_destroy,
525 .dirty = vmw_framebuffer_surface_dirty,
526 .create_handle = vmw_framebuffer_create_handle,
529 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
530 struct drm_file *file_priv,
531 struct vmw_surface *surface,
532 struct vmw_framebuffer **out,
533 const struct drm_mode_fb_cmd
537 struct drm_device *dev = dev_priv->dev;
538 struct vmw_framebuffer_surface *vfbs;
539 enum SVGA3dSurfaceFormat format;
540 struct vmw_master *vmaster = vmw_master(file_priv->master);
547 if (unlikely(surface->mip_levels[0] != 1 ||
548 surface->num_sizes != 1 ||
549 surface->sizes[0].width < mode_cmd->width ||
550 surface->sizes[0].height < mode_cmd->height ||
551 surface->sizes[0].depth != 1)) {
552 DRM_ERROR("Incompatible surface dimensions "
553 "for requested mode.\n");
557 switch (mode_cmd->depth) {
559 format = SVGA3D_A8R8G8B8;
562 format = SVGA3D_X8R8G8B8;
565 format = SVGA3D_R5G6B5;
568 format = SVGA3D_A1R5G5B5;
571 format = SVGA3D_LUMINANCE8;
574 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
578 if (unlikely(format != surface->format)) {
579 DRM_ERROR("Invalid surface format for requested mode.\n");
583 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
589 ret = drm_framebuffer_init(dev, &vfbs->base.base,
590 &vmw_framebuffer_surface_funcs);
594 if (!vmw_surface_reference(surface)) {
595 DRM_ERROR("failed to reference surface %p\n", surface);
599 /* XXX get the first 3 from the surface info */
600 vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
601 vfbs->base.base.pitch = mode_cmd->pitch;
602 vfbs->base.base.depth = mode_cmd->depth;
603 vfbs->base.base.width = mode_cmd->width;
604 vfbs->base.base.height = mode_cmd->height;
605 vfbs->base.pin = &vmw_surface_dmabuf_pin;
606 vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
607 vfbs->surface = surface;
608 vfbs->master = drm_master_get(file_priv->master);
609 mutex_init(&vfbs->work_lock);
611 mutex_lock(&vmaster->fb_surf_mutex);
612 INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
613 list_add_tail(&vfbs->head, &vmaster->fb_surf);
614 mutex_unlock(&vmaster->fb_surf_mutex);
621 drm_framebuffer_cleanup(&vfbs->base.base);
629 * Dmabuf framebuffer code
632 #define vmw_framebuffer_to_vfbd(x) \
633 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
635 struct vmw_framebuffer_dmabuf {
636 struct vmw_framebuffer base;
637 struct vmw_dma_buffer *buffer;
640 void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
642 struct vmw_framebuffer_dmabuf *vfbd =
643 vmw_framebuffer_to_vfbd(framebuffer);
645 drm_framebuffer_cleanup(framebuffer);
646 vmw_dmabuf_unreference(&vfbd->buffer);
651 int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
652 struct drm_file *file_priv,
653 unsigned flags, unsigned color,
654 struct drm_clip_rect *clips,
657 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
658 struct vmw_master *vmaster = vmw_master(file_priv->master);
659 struct drm_clip_rect norect;
663 SVGAFifoCmdUpdate body;
665 int i, increment = 1;
667 ret = ttm_read_lock(&vmaster->lock, true);
668 if (unlikely(ret != 0))
674 norect.x1 = norect.y1 = 0;
675 norect.x2 = framebuffer->width;
676 norect.y2 = framebuffer->height;
677 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
682 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
683 if (unlikely(cmd == NULL)) {
684 DRM_ERROR("Fifo reserve failed.\n");
685 ttm_read_unlock(&vmaster->lock);
689 for (i = 0; i < num_clips; i++, clips += increment) {
690 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
691 cmd[i].body.x = cpu_to_le32(clips->x1);
692 cmd[i].body.y = cpu_to_le32(clips->y1);
693 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
694 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
697 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
698 ttm_read_unlock(&vmaster->lock);
703 static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
704 .destroy = vmw_framebuffer_dmabuf_destroy,
705 .dirty = vmw_framebuffer_dmabuf_dirty,
706 .create_handle = vmw_framebuffer_create_handle,
709 static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
711 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
712 struct vmw_framebuffer_surface *vfbs =
713 vmw_framebuffer_to_vfbs(&vfb->base);
714 unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
717 vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
718 if (unlikely(vfbs->buffer == NULL))
721 vmw_overlay_pause_all(dev_priv);
722 ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
723 &vmw_vram_ne_placement,
724 false, &vmw_dmabuf_bo_free);
725 vmw_overlay_resume_all(dev_priv);
726 if (unlikely(ret != 0))
732 static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
734 struct ttm_buffer_object *bo;
735 struct vmw_framebuffer_surface *vfbs =
736 vmw_framebuffer_to_vfbs(&vfb->base);
738 if (unlikely(vfbs->buffer == NULL))
741 bo = &vfbs->buffer->base;
748 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
750 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
751 struct vmw_framebuffer_dmabuf *vfbd =
752 vmw_framebuffer_to_vfbd(&vfb->base);
756 vmw_overlay_pause_all(dev_priv);
758 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
760 vmw_overlay_resume_all(dev_priv);
767 static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
769 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
770 struct vmw_framebuffer_dmabuf *vfbd =
771 vmw_framebuffer_to_vfbd(&vfb->base);
774 WARN_ON(!vfbd->buffer);
778 return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
781 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
782 struct vmw_dma_buffer *dmabuf,
783 struct vmw_framebuffer **out,
784 const struct drm_mode_fb_cmd
788 struct drm_device *dev = dev_priv->dev;
789 struct vmw_framebuffer_dmabuf *vfbd;
790 unsigned int requested_size;
793 requested_size = mode_cmd->height * mode_cmd->pitch;
794 if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
795 DRM_ERROR("Screen buffer object size is too small "
796 "for requested mode.\n");
800 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
806 ret = drm_framebuffer_init(dev, &vfbd->base.base,
807 &vmw_framebuffer_dmabuf_funcs);
811 if (!vmw_dmabuf_reference(dmabuf)) {
812 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
816 vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
817 vfbd->base.base.pitch = mode_cmd->pitch;
818 vfbd->base.base.depth = mode_cmd->depth;
819 vfbd->base.base.width = mode_cmd->width;
820 vfbd->base.base.height = mode_cmd->height;
821 vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
822 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
823 vfbd->buffer = dmabuf;
829 drm_framebuffer_cleanup(&vfbd->base.base);
837 * Generic Kernel modesetting functions
840 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
841 struct drm_file *file_priv,
842 struct drm_mode_fb_cmd *mode_cmd)
844 struct vmw_private *dev_priv = vmw_priv(dev);
845 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
846 struct vmw_framebuffer *vfb = NULL;
847 struct vmw_surface *surface = NULL;
848 struct vmw_dma_buffer *bo = NULL;
853 * This code should be conditioned on Screen Objects not being used.
854 * If screen objects are used, we can allocate a GMR to hold the
855 * requested framebuffer.
858 required_size = mode_cmd->pitch * mode_cmd->height;
859 if (unlikely(required_size > (u64) dev_priv->vram_size)) {
860 DRM_ERROR("VRAM size is too small for requested mode.\n");
865 * End conditioned code.
868 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
869 mode_cmd->handle, &surface);
873 if (!surface->scanout)
874 goto err_not_scanout;
876 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
879 /* vmw_user_surface_lookup takes one ref so does new_fb */
880 vmw_surface_unreference(&surface);
883 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
889 DRM_INFO("%s: trying buffer\n", __func__);
891 ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
893 DRM_ERROR("failed to find buffer: %i\n", ret);
894 return ERR_PTR(-ENOENT);
897 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
900 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
901 vmw_dmabuf_unreference(&bo);
904 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
911 DRM_ERROR("surface not marked as scanout\n");
912 /* vmw_user_surface_lookup takes one ref */
913 vmw_surface_unreference(&surface);
915 return ERR_PTR(-EINVAL);
918 static struct drm_mode_config_funcs vmw_kms_funcs = {
919 .fb_create = vmw_kms_fb_create,
922 int vmw_kms_init(struct vmw_private *dev_priv)
924 struct drm_device *dev = dev_priv->dev;
927 drm_mode_config_init(dev);
928 dev->mode_config.funcs = &vmw_kms_funcs;
929 dev->mode_config.min_width = 1;
930 dev->mode_config.min_height = 1;
931 /* assumed largest fb size */
932 dev->mode_config.max_width = 8192;
933 dev->mode_config.max_height = 8192;
935 ret = vmw_kms_init_legacy_display_system(dev_priv);
940 int vmw_kms_close(struct vmw_private *dev_priv)
943 * Docs says we should take the lock before calling this function
944 * but since it destroys encoders and our destructor calls
945 * drm_encoder_cleanup which takes the lock we deadlock.
947 drm_mode_config_cleanup(dev_priv->dev);
948 vmw_kms_close_legacy_display_system(dev_priv);
952 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
953 struct drm_file *file_priv)
955 struct drm_vmw_cursor_bypass_arg *arg = data;
956 struct vmw_display_unit *du;
957 struct drm_mode_object *obj;
958 struct drm_crtc *crtc;
962 mutex_lock(&dev->mode_config.mutex);
963 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
965 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
966 du = vmw_crtc_to_du(crtc);
967 du->hotspot_x = arg->xhot;
968 du->hotspot_y = arg->yhot;
971 mutex_unlock(&dev->mode_config.mutex);
975 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
981 crtc = obj_to_crtc(obj);
982 du = vmw_crtc_to_du(crtc);
984 du->hotspot_x = arg->xhot;
985 du->hotspot_y = arg->yhot;
988 mutex_unlock(&dev->mode_config.mutex);
993 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
994 unsigned width, unsigned height, unsigned pitch,
995 unsigned bpp, unsigned depth)
997 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
998 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
999 else if (vmw_fifo_have_pitchlock(vmw_priv))
1000 iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1001 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1002 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1003 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1005 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1006 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1007 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1014 int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1016 struct vmw_vga_topology_state *save;
1019 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
1020 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
1021 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
1022 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1023 vmw_priv->vga_pitchlock =
1024 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1025 else if (vmw_fifo_have_pitchlock(vmw_priv))
1026 vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
1027 SVGA_FIFO_PITCHLOCK);
1029 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1032 vmw_priv->num_displays = vmw_read(vmw_priv,
1033 SVGA_REG_NUM_GUEST_DISPLAYS);
1035 if (vmw_priv->num_displays == 0)
1036 vmw_priv->num_displays = 1;
1038 for (i = 0; i < vmw_priv->num_displays; ++i) {
1039 save = &vmw_priv->vga_save[i];
1040 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1041 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
1042 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
1043 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
1044 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
1045 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
1046 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1047 if (i == 0 && vmw_priv->num_displays == 1 &&
1048 save->width == 0 && save->height == 0) {
1051 * It should be fairly safe to assume that these
1052 * values are uninitialized.
1055 save->width = vmw_priv->vga_width - save->pos_x;
1056 save->height = vmw_priv->vga_height - save->pos_y;
1063 int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1065 struct vmw_vga_topology_state *save;
1068 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
1069 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
1070 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
1071 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1072 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1073 vmw_priv->vga_pitchlock);
1074 else if (vmw_fifo_have_pitchlock(vmw_priv))
1075 iowrite32(vmw_priv->vga_pitchlock,
1076 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1078 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1081 for (i = 0; i < vmw_priv->num_displays; ++i) {
1082 save = &vmw_priv->vga_save[i];
1083 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1084 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1085 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1086 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1087 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1088 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1089 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1095 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1099 return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
1102 u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)