1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "vmwgfx_drv.h"
32 #include "ttm/ttm_placement.h"
34 #include "svga_overlay.h"
35 #include "svga_escape.h"
37 #define VMW_MAX_NUM_STREAMS 1
40 struct vmw_dma_buffer *buf;
43 struct drm_vmw_control_stream_arg saved;
51 * Each stream is a single overlay. In Xv these are called ports.
54 struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
57 static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
59 struct vmw_private *dev_priv = vmw_priv(dev);
60 return dev_priv ? dev_priv->overlay_priv : NULL;
63 struct vmw_escape_header {
65 SVGAFifoCmdEscape body;
68 struct vmw_escape_video_flush {
69 struct vmw_escape_header escape;
70 SVGAEscapeVideoFlush flush;
73 static inline void fill_escape(struct vmw_escape_header *header,
76 header->cmd = SVGA_CMD_ESCAPE;
77 header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
78 header->body.size = size;
81 static inline void fill_flush(struct vmw_escape_video_flush *cmd,
84 fill_escape(&cmd->escape, sizeof(cmd->flush));
85 cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
86 cmd->flush.streamId = stream_id;
90 * Pin or unpin a buffer in vram.
92 * @dev_priv: Driver private.
93 * @buf: DMA buffer to pin or unpin.
94 * @pin: Pin buffer in vram if true.
95 * @interruptible: Use interruptible wait.
97 * Takes the current masters ttm lock in read.
100 * -ERESTARTSYS if interrupted by a signal.
102 static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
103 struct vmw_dma_buffer *buf,
104 bool pin, bool interruptible)
106 struct ttm_buffer_object *bo = &buf->base;
107 struct ttm_bo_global *glob = bo->glob;
108 struct ttm_placement *overlay_placement = &vmw_vram_placement;
111 ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
112 if (unlikely(ret != 0))
115 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
116 if (unlikely(ret != 0))
119 if (buf->gmr_bound) {
120 vmw_gmr_unbind(dev_priv, buf->gmr_id);
121 spin_lock(&glob->lru_lock);
122 ida_remove(&dev_priv->gmr_ida, buf->gmr_id);
123 spin_unlock(&glob->lru_lock);
124 buf->gmr_bound = NULL;
128 overlay_placement = &vmw_vram_ne_placement;
130 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
132 ttm_bo_unreserve(bo);
135 ttm_read_unlock(&dev_priv->active_master->lock);
141 * Send put command to hw.
144 * -ERESTARTSYS if interrupted by a signal.
146 static int vmw_overlay_send_put(struct vmw_private *dev_priv,
147 struct vmw_dma_buffer *buf,
148 struct drm_vmw_control_stream_arg *arg,
152 struct vmw_escape_header escape;
161 } items[SVGA_VIDEO_PITCH_3 + 1];
163 struct vmw_escape_video_flush flush;
169 cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
173 ret = vmw_fallback_wait(dev_priv, false, true, 0,
174 interruptible, 3*HZ);
175 if (interruptible && ret == -ERESTARTSYS)
181 fill_escape(&cmds->escape, sizeof(cmds->body));
182 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
183 cmds->body.header.streamId = arg->stream_id;
185 for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
186 cmds->body.items[i].registerId = i;
188 offset = buf->base.offset + arg->offset;
190 cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
191 cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
192 cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
193 cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
194 cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
195 cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
196 cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
197 cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
198 cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
199 cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
200 cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
201 cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
202 cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
203 cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
204 cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
205 cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
206 cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
207 cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
208 cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
210 fill_flush(&cmds->flush, arg->stream_id);
212 vmw_fifo_commit(dev_priv, sizeof(*cmds));
218 * Send stop command to hw.
221 * -ERESTARTSYS if interrupted by a signal.
223 static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
228 struct vmw_escape_header escape;
229 SVGAEscapeVideoSetRegs body;
230 struct vmw_escape_video_flush flush;
235 cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
239 ret = vmw_fallback_wait(dev_priv, false, true, 0,
240 interruptible, 3*HZ);
241 if (interruptible && ret == -ERESTARTSYS)
247 fill_escape(&cmds->escape, sizeof(cmds->body));
248 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
249 cmds->body.header.streamId = stream_id;
250 cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
251 cmds->body.items[0].value = false;
252 fill_flush(&cmds->flush, stream_id);
254 vmw_fifo_commit(dev_priv, sizeof(*cmds));
260 * Stop or pause a stream.
262 * If the stream is paused the no evict flag is removed from the buffer
263 * but left in vram. This allows for instance mode_set to evict it
266 * The caller must hold the overlay lock.
268 * @stream_id which stream to stop/pause.
269 * @pause true to pause, false to stop completely.
271 static int vmw_overlay_stop(struct vmw_private *dev_priv,
272 uint32_t stream_id, bool pause,
275 struct vmw_overlay *overlay = dev_priv->overlay_priv;
276 struct vmw_stream *stream = &overlay->stream[stream_id];
279 /* no buffer attached the stream is completely stopped */
283 /* If the stream is paused this is already done */
284 if (!stream->paused) {
285 ret = vmw_overlay_send_stop(dev_priv, stream_id,
290 /* We just remove the NO_EVICT flag so no -ENOMEM */
291 ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
293 if (interruptible && ret == -ERESTARTSYS)
300 vmw_dmabuf_unreference(&stream->buf);
301 stream->paused = false;
303 stream->paused = true;
310 * Update a stream and send any put or stop fifo commands needed.
312 * The caller must hold the overlay lock.
315 * -ENOMEM if buffer doesn't fit in vram.
316 * -ERESTARTSYS if interrupted.
318 static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
319 struct vmw_dma_buffer *buf,
320 struct drm_vmw_control_stream_arg *arg,
323 struct vmw_overlay *overlay = dev_priv->overlay_priv;
324 struct vmw_stream *stream = &overlay->stream[arg->stream_id];
330 DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
331 stream->buf, buf, stream->paused ? "" : "not ");
333 if (stream->buf != buf) {
334 ret = vmw_overlay_stop(dev_priv, arg->stream_id,
335 false, interruptible);
338 } else if (!stream->paused) {
339 /* If the buffers match and not paused then just send
340 * the put command, no need to do anything else.
342 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
344 stream->saved = *arg;
346 BUG_ON(!interruptible);
351 /* We don't start the old stream if we are interrupted.
352 * Might return -ENOMEM if it can't fit the buffer in vram.
354 ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
358 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
360 /* This one needs to happen no matter what. We only remove
361 * the NO_EVICT flag so this is safe from -ENOMEM.
363 BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
367 if (stream->buf != buf)
368 stream->buf = vmw_dmabuf_reference(buf);
369 stream->saved = *arg;
377 * Used by the fb code when starting.
379 * Takes the overlay lock.
381 int vmw_overlay_stop_all(struct vmw_private *dev_priv)
383 struct vmw_overlay *overlay = dev_priv->overlay_priv;
389 mutex_lock(&overlay->mutex);
391 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
392 struct vmw_stream *stream = &overlay->stream[i];
396 ret = vmw_overlay_stop(dev_priv, i, false, false);
400 mutex_unlock(&overlay->mutex);
406 * Try to resume all paused streams.
408 * Used by the kms code after moving a new scanout buffer to vram.
410 * Takes the overlay lock.
412 int vmw_overlay_resume_all(struct vmw_private *dev_priv)
414 struct vmw_overlay *overlay = dev_priv->overlay_priv;
420 mutex_lock(&overlay->mutex);
422 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
423 struct vmw_stream *stream = &overlay->stream[i];
427 ret = vmw_overlay_update_stream(dev_priv, stream->buf,
428 &stream->saved, false);
430 DRM_INFO("%s: *warning* failed to resume stream %i\n",
434 mutex_unlock(&overlay->mutex);
440 * Pauses all active streams.
442 * Used by the kms code when moving a new scanout buffer to vram.
444 * Takes the overlay lock.
446 int vmw_overlay_pause_all(struct vmw_private *dev_priv)
448 struct vmw_overlay *overlay = dev_priv->overlay_priv;
454 mutex_lock(&overlay->mutex);
456 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
457 if (overlay->stream[i].paused)
458 DRM_INFO("%s: *warning* stream %i already paused\n",
460 ret = vmw_overlay_stop(dev_priv, i, true, false);
464 mutex_unlock(&overlay->mutex);
469 int vmw_overlay_ioctl(struct drm_device *dev, void *data,
470 struct drm_file *file_priv)
472 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
473 struct vmw_private *dev_priv = vmw_priv(dev);
474 struct vmw_overlay *overlay = dev_priv->overlay_priv;
475 struct drm_vmw_control_stream_arg *arg =
476 (struct drm_vmw_control_stream_arg *)data;
477 struct vmw_dma_buffer *buf;
478 struct vmw_resource *res;
484 ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
488 mutex_lock(&overlay->mutex);
491 ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
495 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
499 ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
501 vmw_dmabuf_unreference(&buf);
504 mutex_unlock(&overlay->mutex);
505 vmw_resource_unreference(&res);
510 int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
512 if (!dev_priv->overlay_priv)
515 return VMW_MAX_NUM_STREAMS;
518 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
520 struct vmw_overlay *overlay = dev_priv->overlay_priv;
526 mutex_lock(&overlay->mutex);
528 for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
529 if (!overlay->stream[i].claimed)
532 mutex_unlock(&overlay->mutex);
537 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
539 struct vmw_overlay *overlay = dev_priv->overlay_priv;
545 mutex_lock(&overlay->mutex);
547 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
549 if (overlay->stream[i].claimed)
552 overlay->stream[i].claimed = true;
554 mutex_unlock(&overlay->mutex);
558 mutex_unlock(&overlay->mutex);
562 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
564 struct vmw_overlay *overlay = dev_priv->overlay_priv;
566 BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
571 mutex_lock(&overlay->mutex);
573 WARN_ON(!overlay->stream[stream_id].claimed);
574 vmw_overlay_stop(dev_priv, stream_id, false, false);
575 overlay->stream[stream_id].claimed = false;
577 mutex_unlock(&overlay->mutex);
581 int vmw_overlay_init(struct vmw_private *dev_priv)
583 struct vmw_overlay *overlay;
586 if (dev_priv->overlay_priv)
589 if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
590 (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
591 DRM_INFO("hardware doesn't support overlays\n");
595 overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
599 memset(overlay, 0, sizeof(*overlay));
600 mutex_init(&overlay->mutex);
601 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
602 overlay->stream[i].buf = NULL;
603 overlay->stream[i].paused = false;
604 overlay->stream[i].claimed = false;
607 dev_priv->overlay_priv = overlay;
612 int vmw_overlay_close(struct vmw_private *dev_priv)
614 struct vmw_overlay *overlay = dev_priv->overlay_priv;
615 bool forgotten_buffer = false;
621 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
622 if (overlay->stream[i].buf) {
623 forgotten_buffer = true;
624 vmw_overlay_stop(dev_priv, i, false, false);
628 WARN_ON(forgotten_buffer);
630 dev_priv->overlay_priv = NULL;