radeon: add support for rs600 GPUs
[pandora-kernel.git] / drivers / gpu / drm / radeon / r300_cmdbuf.c
1 /* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
2  *
3  * Copyright (C) The Weather Channel, Inc.  2002.
4  * Copyright (C) 2004 Nicolai Haehnle.
5  * All Rights Reserved.
6  *
7  * The Weather Channel (TM) funded Tungsten Graphics to develop the
8  * initial release of the Radeon 8500 driver under the XFree86 license.
9  * This notice must be preserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the "Software"),
13  * to deal in the Software without restriction, including without limitation
14  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15  * and/or sell copies of the Software, and to permit persons to whom the
16  * Software is furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the next
19  * paragraph) shall be included in all copies or substantial portions of the
20  * Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
25  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28  * DEALINGS IN THE SOFTWARE.
29  *
30  * Authors:
31  *    Nicolai Haehnle <prefect_@gmx.net>
32  */
33
34 #include "drmP.h"
35 #include "drm.h"
36 #include "radeon_drm.h"
37 #include "radeon_drv.h"
38 #include "r300_reg.h"
39
40 #include <asm/unaligned.h>
41
42 #define R300_SIMULTANEOUS_CLIPRECTS             4
43
44 /* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
45  */
46 static const int r300_cliprect_cntl[4] = {
47         0xAAAA,
48         0xEEEE,
49         0xFEFE,
50         0xFFFE
51 };
52
53 /**
54  * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
55  * buffer, starting with index n.
56  */
57 static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
58                                drm_radeon_kcmd_buffer_t *cmdbuf, int n)
59 {
60         struct drm_clip_rect box;
61         int nr;
62         int i;
63         RING_LOCALS;
64
65         nr = cmdbuf->nbox - n;
66         if (nr > R300_SIMULTANEOUS_CLIPRECTS)
67                 nr = R300_SIMULTANEOUS_CLIPRECTS;
68
69         DRM_DEBUG("%i cliprects\n", nr);
70
71         if (nr) {
72                 BEGIN_RING(6 + nr * 2);
73                 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
74
75                 for (i = 0; i < nr; ++i) {
76                         if (DRM_COPY_FROM_USER_UNCHECKED
77                             (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
78                                 DRM_ERROR("copy cliprect faulted\n");
79                                 return -EFAULT;
80                         }
81
82                         box.x2--; /* Hardware expects inclusive bottom-right corner */
83                         box.y2--;
84
85                         if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
86                                 box.x1 = (box.x1) &
87                                         R300_CLIPRECT_MASK;
88                                 box.y1 = (box.y1) &
89                                         R300_CLIPRECT_MASK;
90                                 box.x2 = (box.x2) &
91                                         R300_CLIPRECT_MASK;
92                                 box.y2 = (box.y2) &
93                                         R300_CLIPRECT_MASK;
94                         } else {
95                                 box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
96                                         R300_CLIPRECT_MASK;
97                                 box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
98                                         R300_CLIPRECT_MASK;
99                                 box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
100                                         R300_CLIPRECT_MASK;
101                                 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
102                                         R300_CLIPRECT_MASK;
103                         }
104
105                         OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
106                                  (box.y1 << R300_CLIPRECT_Y_SHIFT));
107                         OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
108                                  (box.y2 << R300_CLIPRECT_Y_SHIFT));
109
110                 }
111
112                 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
113
114                 /* TODO/SECURITY: Force scissors to a safe value, otherwise the
115                  * client might be able to trample over memory.
116                  * The impact should be very limited, but I'd rather be safe than
117                  * sorry.
118                  */
119                 OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
120                 OUT_RING(0);
121                 OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
122                 ADVANCE_RING();
123         } else {
124                 /* Why we allow zero cliprect rendering:
125                  * There are some commands in a command buffer that must be submitted
126                  * even when there are no cliprects, e.g. DMA buffer discard
127                  * or state setting (though state setting could be avoided by
128                  * simulating a loss of context).
129                  *
130                  * Now since the cmdbuf interface is so chaotic right now (and is
131                  * bound to remain that way for a bit until things settle down),
132                  * it is basically impossible to filter out the commands that are
133                  * necessary and those that aren't.
134                  *
135                  * So I choose the safe way and don't do any filtering at all;
136                  * instead, I simply set up the engine so that all rendering
137                  * can't produce any fragments.
138                  */
139                 BEGIN_RING(2);
140                 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
141                 ADVANCE_RING();
142         }
143
144         /* flus cache and wait idle clean after cliprect change */
145         BEGIN_RING(2);
146         OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
147         OUT_RING(R300_RB3D_DC_FLUSH);
148         ADVANCE_RING();
149         BEGIN_RING(2);
150         OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
151         OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
152         ADVANCE_RING();
153         /* set flush flag */
154         dev_priv->track_flush |= RADEON_FLUSH_EMITED;
155
156         return 0;
157 }
158
159 static u8 r300_reg_flags[0x10000 >> 2];
160
161 void r300_init_reg_flags(struct drm_device *dev)
162 {
163         int i;
164         drm_radeon_private_t *dev_priv = dev->dev_private;
165
166         memset(r300_reg_flags, 0, 0x10000 >> 2);
167 #define ADD_RANGE_MARK(reg, count,mark) \
168                 for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
169                         r300_reg_flags[i]|=(mark);
170
171 #define MARK_SAFE               1
172 #define MARK_CHECK_OFFSET       2
173
174 #define ADD_RANGE(reg, count)   ADD_RANGE_MARK(reg, count, MARK_SAFE)
175
176         /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
177         ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
178         ADD_RANGE(R300_VAP_CNTL, 1);
179         ADD_RANGE(R300_SE_VTE_CNTL, 2);
180         ADD_RANGE(0x2134, 2);
181         ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
182         ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
183         ADD_RANGE(0x21DC, 1);
184         ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
185         ADD_RANGE(R300_VAP_CLIP_X_0, 4);
186         ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
187         ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
188         ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
189         ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
190         ADD_RANGE(R300_GB_ENABLE, 1);
191         ADD_RANGE(R300_GB_MSPOS0, 5);
192         ADD_RANGE(R300_TX_INVALTAGS, 1);
193         ADD_RANGE(R300_TX_ENABLE, 1);
194         ADD_RANGE(0x4200, 4);
195         ADD_RANGE(0x4214, 1);
196         ADD_RANGE(R300_RE_POINTSIZE, 1);
197         ADD_RANGE(0x4230, 3);
198         ADD_RANGE(R300_RE_LINE_CNT, 1);
199         ADD_RANGE(R300_RE_UNK4238, 1);
200         ADD_RANGE(0x4260, 3);
201         ADD_RANGE(R300_RE_SHADE, 4);
202         ADD_RANGE(R300_RE_POLYGON_MODE, 5);
203         ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
204         ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
205         ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
206         ADD_RANGE(R300_RE_CULL_CNTL, 1);
207         ADD_RANGE(0x42C0, 2);
208         ADD_RANGE(R300_RS_CNTL_0, 2);
209
210         ADD_RANGE(R300_SC_HYPERZ, 2);
211         ADD_RANGE(0x43E8, 1);
212
213         ADD_RANGE(0x46A4, 5);
214
215         ADD_RANGE(R300_RE_FOG_STATE, 1);
216         ADD_RANGE(R300_FOG_COLOR_R, 3);
217         ADD_RANGE(R300_PP_ALPHA_TEST, 2);
218         ADD_RANGE(0x4BD8, 1);
219         ADD_RANGE(R300_PFS_PARAM_0_X, 64);
220         ADD_RANGE(0x4E00, 1);
221         ADD_RANGE(R300_RB3D_CBLEND, 2);
222         ADD_RANGE(R300_RB3D_COLORMASK, 1);
223         ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
224         ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET);   /* check offset */
225         ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
226         ADD_RANGE(0x4E50, 9);
227         ADD_RANGE(0x4E88, 1);
228         ADD_RANGE(0x4EA0, 2);
229         ADD_RANGE(R300_ZB_CNTL, 3);
230         ADD_RANGE(R300_ZB_FORMAT, 4);
231         ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET);      /* check offset */
232         ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
233         ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
234         ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
235
236         ADD_RANGE(R300_TX_FILTER_0, 16);
237         ADD_RANGE(R300_TX_FILTER1_0, 16);
238         ADD_RANGE(R300_TX_SIZE_0, 16);
239         ADD_RANGE(R300_TX_FORMAT_0, 16);
240         ADD_RANGE(R300_TX_PITCH_0, 16);
241         /* Texture offset is dangerous and needs more checking */
242         ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
243         ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
244         ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
245
246         /* Sporadic registers used as primitives are emitted */
247         ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
248         ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
249         ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
250         ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
251
252         if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
253                 ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
254                 ADD_RANGE(R500_US_CONFIG, 2);
255                 ADD_RANGE(R500_US_CODE_ADDR, 3);
256                 ADD_RANGE(R500_US_FC_CTRL, 1);
257                 ADD_RANGE(R500_RS_IP_0, 16);
258                 ADD_RANGE(R500_RS_INST_0, 16);
259                 ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
260                 ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
261                 ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
262         } else {
263                 ADD_RANGE(R300_PFS_CNTL_0, 3);
264                 ADD_RANGE(R300_PFS_NODE_0, 4);
265                 ADD_RANGE(R300_PFS_TEXI_0, 64);
266                 ADD_RANGE(R300_PFS_INSTR0_0, 64);
267                 ADD_RANGE(R300_PFS_INSTR1_0, 64);
268                 ADD_RANGE(R300_PFS_INSTR2_0, 64);
269                 ADD_RANGE(R300_PFS_INSTR3_0, 64);
270                 ADD_RANGE(R300_RS_INTERP_0, 8);
271                 ADD_RANGE(R300_RS_ROUTE_0, 8);
272
273         }
274 }
275
276 static __inline__ int r300_check_range(unsigned reg, int count)
277 {
278         int i;
279         if (reg & ~0xffff)
280                 return -1;
281         for (i = (reg >> 2); i < (reg >> 2) + count; i++)
282                 if (r300_reg_flags[i] != MARK_SAFE)
283                         return 1;
284         return 0;
285 }
286
287 static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
288                                                           dev_priv,
289                                                           drm_radeon_kcmd_buffer_t
290                                                           * cmdbuf,
291                                                           drm_r300_cmd_header_t
292                                                           header)
293 {
294         int reg;
295         int sz;
296         int i;
297         int values[64];
298         RING_LOCALS;
299
300         sz = header.packet0.count;
301         reg = (header.packet0.reghi << 8) | header.packet0.reglo;
302
303         if ((sz > 64) || (sz < 0)) {
304                 DRM_ERROR
305                     ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
306                      reg, sz);
307                 return -EINVAL;
308         }
309         for (i = 0; i < sz; i++) {
310                 values[i] = ((int *)cmdbuf->buf)[i];
311                 switch (r300_reg_flags[(reg >> 2) + i]) {
312                 case MARK_SAFE:
313                         break;
314                 case MARK_CHECK_OFFSET:
315                         if (!radeon_check_offset(dev_priv, (u32) values[i])) {
316                                 DRM_ERROR
317                                     ("Offset failed range check (reg=%04x sz=%d)\n",
318                                      reg, sz);
319                                 return -EINVAL;
320                         }
321                         break;
322                 default:
323                         DRM_ERROR("Register %04x failed check as flag=%02x\n",
324                                   reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
325                         return -EINVAL;
326                 }
327         }
328
329         BEGIN_RING(1 + sz);
330         OUT_RING(CP_PACKET0(reg, sz - 1));
331         OUT_RING_TABLE(values, sz);
332         ADVANCE_RING();
333
334         cmdbuf->buf += sz * 4;
335         cmdbuf->bufsz -= sz * 4;
336
337         return 0;
338 }
339
340 /**
341  * Emits a packet0 setting arbitrary registers.
342  * Called by r300_do_cp_cmdbuf.
343  *
344  * Note that checks are performed on contents and addresses of the registers
345  */
346 static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
347                                         drm_radeon_kcmd_buffer_t *cmdbuf,
348                                         drm_r300_cmd_header_t header)
349 {
350         int reg;
351         int sz;
352         RING_LOCALS;
353
354         sz = header.packet0.count;
355         reg = (header.packet0.reghi << 8) | header.packet0.reglo;
356
357         if (!sz)
358                 return 0;
359
360         if (sz * 4 > cmdbuf->bufsz)
361                 return -EINVAL;
362
363         if (reg + sz * 4 >= 0x10000) {
364                 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
365                           sz);
366                 return -EINVAL;
367         }
368
369         if (r300_check_range(reg, sz)) {
370                 /* go and check everything */
371                 return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
372                                                            header);
373         }
374         /* the rest of the data is safe to emit, whatever the values the user passed */
375
376         BEGIN_RING(1 + sz);
377         OUT_RING(CP_PACKET0(reg, sz - 1));
378         OUT_RING_TABLE((int *)cmdbuf->buf, sz);
379         ADVANCE_RING();
380
381         cmdbuf->buf += sz * 4;
382         cmdbuf->bufsz -= sz * 4;
383
384         return 0;
385 }
386
387 /**
388  * Uploads user-supplied vertex program instructions or parameters onto
389  * the graphics card.
390  * Called by r300_do_cp_cmdbuf.
391  */
392 static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
393                                     drm_radeon_kcmd_buffer_t *cmdbuf,
394                                     drm_r300_cmd_header_t header)
395 {
396         int sz;
397         int addr;
398         RING_LOCALS;
399
400         sz = header.vpu.count;
401         addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
402
403         if (!sz)
404                 return 0;
405         if (sz * 16 > cmdbuf->bufsz)
406                 return -EINVAL;
407
408         /* VAP is very sensitive so we purge cache before we program it
409          * and we also flush its state before & after */
410         BEGIN_RING(6);
411         OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
412         OUT_RING(R300_RB3D_DC_FLUSH);
413         OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
414         OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
415         OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
416         OUT_RING(0);
417         ADVANCE_RING();
418         /* set flush flag */
419         dev_priv->track_flush |= RADEON_FLUSH_EMITED;
420
421         BEGIN_RING(3 + sz * 4);
422         OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
423         OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
424         OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
425         ADVANCE_RING();
426
427         BEGIN_RING(2);
428         OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
429         OUT_RING(0);
430         ADVANCE_RING();
431
432         cmdbuf->buf += sz * 16;
433         cmdbuf->bufsz -= sz * 16;
434
435         return 0;
436 }
437
438 /**
439  * Emit a clear packet from userspace.
440  * Called by r300_emit_packet3.
441  */
442 static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
443                                       drm_radeon_kcmd_buffer_t *cmdbuf)
444 {
445         RING_LOCALS;
446
447         if (8 * 4 > cmdbuf->bufsz)
448                 return -EINVAL;
449
450         BEGIN_RING(10);
451         OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
452         OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
453                  (1 << R300_PRIM_NUM_VERTICES_SHIFT));
454         OUT_RING_TABLE((int *)cmdbuf->buf, 8);
455         ADVANCE_RING();
456
457         BEGIN_RING(4);
458         OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
459         OUT_RING(R300_RB3D_DC_FLUSH);
460         OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
461         OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
462         ADVANCE_RING();
463         /* set flush flag */
464         dev_priv->track_flush |= RADEON_FLUSH_EMITED;
465
466         cmdbuf->buf += 8 * 4;
467         cmdbuf->bufsz -= 8 * 4;
468
469         return 0;
470 }
471
472 static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
473                                                drm_radeon_kcmd_buffer_t *cmdbuf,
474                                                u32 header)
475 {
476         int count, i, k;
477 #define MAX_ARRAY_PACKET  64
478         u32 payload[MAX_ARRAY_PACKET];
479         u32 narrays;
480         RING_LOCALS;
481
482         count = (header >> 16) & 0x3fff;
483
484         if ((count + 1) > MAX_ARRAY_PACKET) {
485                 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
486                           count);
487                 return -EINVAL;
488         }
489         memset(payload, 0, MAX_ARRAY_PACKET * 4);
490         memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
491
492         /* carefully check packet contents */
493
494         narrays = payload[0];
495         k = 0;
496         i = 1;
497         while ((k < narrays) && (i < (count + 1))) {
498                 i++;            /* skip attribute field */
499                 if (!radeon_check_offset(dev_priv, payload[i])) {
500                         DRM_ERROR
501                             ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
502                              k, i);
503                         return -EINVAL;
504                 }
505                 k++;
506                 i++;
507                 if (k == narrays)
508                         break;
509                 /* have one more to process, they come in pairs */
510                 if (!radeon_check_offset(dev_priv, payload[i])) {
511                         DRM_ERROR
512                             ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
513                              k, i);
514                         return -EINVAL;
515                 }
516                 k++;
517                 i++;
518         }
519         /* do the counts match what we expect ? */
520         if ((k != narrays) || (i != (count + 1))) {
521                 DRM_ERROR
522                     ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
523                      k, i, narrays, count + 1);
524                 return -EINVAL;
525         }
526
527         /* all clear, output packet */
528
529         BEGIN_RING(count + 2);
530         OUT_RING(header);
531         OUT_RING_TABLE(payload, count + 1);
532         ADVANCE_RING();
533
534         cmdbuf->buf += (count + 2) * 4;
535         cmdbuf->bufsz -= (count + 2) * 4;
536
537         return 0;
538 }
539
540 static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
541                                              drm_radeon_kcmd_buffer_t *cmdbuf)
542 {
543         u32 *cmd = (u32 *) cmdbuf->buf;
544         int count, ret;
545         RING_LOCALS;
546
547         count=(cmd[0]>>16) & 0x3fff;
548
549         if (cmd[0] & 0x8000) {
550                 u32 offset;
551
552                 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
553                               | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
554                         offset = cmd[2] << 10;
555                         ret = !radeon_check_offset(dev_priv, offset);
556                         if (ret) {
557                                 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
558                                 return -EINVAL;
559                         }
560                 }
561
562                 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
563                     (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
564                         offset = cmd[3] << 10;
565                         ret = !radeon_check_offset(dev_priv, offset);
566                         if (ret) {
567                                 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
568                                 return -EINVAL;
569                         }
570
571                 }
572         }
573
574         BEGIN_RING(count+2);
575         OUT_RING(cmd[0]);
576         OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
577         ADVANCE_RING();
578
579         cmdbuf->buf += (count+2)*4;
580         cmdbuf->bufsz -= (count+2)*4;
581
582         return 0;
583 }
584
585 static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
586                                             drm_radeon_kcmd_buffer_t *cmdbuf)
587 {
588         u32 *cmd;
589         int count;
590         int expected_count;
591         RING_LOCALS;
592
593         cmd = (u32 *) cmdbuf->buf;
594         count = (cmd[0]>>16) & 0x3fff;
595         expected_count = cmd[1] >> 16;
596         if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
597                 expected_count = (expected_count+1)/2;
598
599         if (count && count != expected_count) {
600                 DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
601                         count, expected_count);
602                 return -EINVAL;
603         }
604
605         BEGIN_RING(count+2);
606         OUT_RING(cmd[0]);
607         OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
608         ADVANCE_RING();
609
610         cmdbuf->buf += (count+2)*4;
611         cmdbuf->bufsz -= (count+2)*4;
612
613         if (!count) {
614                 drm_r300_cmd_header_t header;
615
616                 if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
617                         DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
618                         return -EINVAL;
619                 }
620
621                 header.u = *(unsigned int *)cmdbuf->buf;
622
623                 cmdbuf->buf += sizeof(header);
624                 cmdbuf->bufsz -= sizeof(header);
625                 cmd = (u32 *) cmdbuf->buf;
626
627                 if (header.header.cmd_type != R300_CMD_PACKET3 ||
628                     header.packet3.packet != R300_CMD_PACKET3_RAW ||
629                     cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
630                         DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
631                         return -EINVAL;
632                 }
633
634                 if ((cmd[1] & 0x8000ffff) != 0x80000810) {
635                         DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
636                         return -EINVAL;
637                 }
638                 if (!radeon_check_offset(dev_priv, cmd[2])) {
639                         DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
640                         return -EINVAL;
641                 }
642                 if (cmd[3] != expected_count) {
643                         DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
644                                 cmd[3], expected_count);
645                         return -EINVAL;
646                 }
647
648                 BEGIN_RING(4);
649                 OUT_RING(cmd[0]);
650                 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
651                 ADVANCE_RING();
652
653                 cmdbuf->buf += 4*4;
654                 cmdbuf->bufsz -= 4*4;
655         }
656
657         return 0;
658 }
659
660 static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
661                                             drm_radeon_kcmd_buffer_t *cmdbuf)
662 {
663         u32 header;
664         int count;
665         RING_LOCALS;
666
667         if (4 > cmdbuf->bufsz)
668                 return -EINVAL;
669
670         /* Fixme !! This simply emits a packet without much checking.
671            We need to be smarter. */
672
673         /* obtain first word - actual packet3 header */
674         header = *(u32 *) cmdbuf->buf;
675
676         /* Is it packet 3 ? */
677         if ((header >> 30) != 0x3) {
678                 DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
679                 return -EINVAL;
680         }
681
682         count = (header >> 16) & 0x3fff;
683
684         /* Check again now that we know how much data to expect */
685         if ((count + 2) * 4 > cmdbuf->bufsz) {
686                 DRM_ERROR
687                     ("Expected packet3 of length %d but have only %d bytes left\n",
688                      (count + 2) * 4, cmdbuf->bufsz);
689                 return -EINVAL;
690         }
691
692         /* Is it a packet type we know about ? */
693         switch (header & 0xff00) {
694         case RADEON_3D_LOAD_VBPNTR:     /* load vertex array pointers */
695                 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
696
697         case RADEON_CNTL_BITBLT_MULTI:
698                 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
699
700         case RADEON_CP_INDX_BUFFER:
701                 DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
702                 return -EINVAL;
703         case RADEON_CP_3D_DRAW_IMMD_2:
704                 /* triggers drawing using in-packet vertex data */
705         case RADEON_CP_3D_DRAW_VBUF_2:
706                 /* triggers drawing of vertex buffers setup elsewhere */
707                 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
708                                            RADEON_PURGE_EMITED);
709                 break;
710         case RADEON_CP_3D_DRAW_INDX_2:
711                 /* triggers drawing using indices to vertex buffer */
712                 /* whenever we send vertex we clear flush & purge */
713                 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
714                                            RADEON_PURGE_EMITED);
715                 return r300_emit_draw_indx_2(dev_priv, cmdbuf);
716         case RADEON_WAIT_FOR_IDLE:
717         case RADEON_CP_NOP:
718                 /* these packets are safe */
719                 break;
720         default:
721                 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
722                 return -EINVAL;
723         }
724
725         BEGIN_RING(count + 2);
726         OUT_RING(header);
727         OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
728         ADVANCE_RING();
729
730         cmdbuf->buf += (count + 2) * 4;
731         cmdbuf->bufsz -= (count + 2) * 4;
732
733         return 0;
734 }
735
736 /**
737  * Emit a rendering packet3 from userspace.
738  * Called by r300_do_cp_cmdbuf.
739  */
740 static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
741                                         drm_radeon_kcmd_buffer_t *cmdbuf,
742                                         drm_r300_cmd_header_t header)
743 {
744         int n;
745         int ret;
746         char *orig_buf = cmdbuf->buf;
747         int orig_bufsz = cmdbuf->bufsz;
748
749         /* This is a do-while-loop so that we run the interior at least once,
750          * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
751          */
752         n = 0;
753         do {
754                 if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
755                         ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
756                         if (ret)
757                                 return ret;
758
759                         cmdbuf->buf = orig_buf;
760                         cmdbuf->bufsz = orig_bufsz;
761                 }
762
763                 switch (header.packet3.packet) {
764                 case R300_CMD_PACKET3_CLEAR:
765                         DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
766                         ret = r300_emit_clear(dev_priv, cmdbuf);
767                         if (ret) {
768                                 DRM_ERROR("r300_emit_clear failed\n");
769                                 return ret;
770                         }
771                         break;
772
773                 case R300_CMD_PACKET3_RAW:
774                         DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
775                         ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
776                         if (ret) {
777                                 DRM_ERROR("r300_emit_raw_packet3 failed\n");
778                                 return ret;
779                         }
780                         break;
781
782                 default:
783                         DRM_ERROR("bad packet3 type %i at %p\n",
784                                   header.packet3.packet,
785                                   cmdbuf->buf - sizeof(header));
786                         return -EINVAL;
787                 }
788
789                 n += R300_SIMULTANEOUS_CLIPRECTS;
790         } while (n < cmdbuf->nbox);
791
792         return 0;
793 }
794
795 /* Some of the R300 chips seem to be extremely touchy about the two registers
796  * that are configured in r300_pacify.
797  * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
798  * sends a command buffer that contains only state setting commands and a
799  * vertex program/parameter upload sequence, this will eventually lead to a
800  * lockup, unless the sequence is bracketed by calls to r300_pacify.
801  * So we should take great care to *always* call r300_pacify before
802  * *anything* 3D related, and again afterwards. This is what the
803  * call bracket in r300_do_cp_cmdbuf is for.
804  */
805
806 /**
807  * Emit the sequence to pacify R300.
808  */
809 static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
810 {
811         uint32_t cache_z, cache_3d, cache_2d;
812         RING_LOCALS;
813
814         cache_z = R300_ZC_FLUSH;
815         cache_2d = R300_RB2D_DC_FLUSH;
816         cache_3d = R300_RB3D_DC_FLUSH;
817         if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
818                 /* we can purge, primitive where draw since last purge */
819                 cache_z |= R300_ZC_FREE;
820                 cache_2d |= R300_RB2D_DC_FREE;
821                 cache_3d |= R300_RB3D_DC_FREE;
822         }
823
824         /* flush & purge zbuffer */
825         BEGIN_RING(2);
826         OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
827         OUT_RING(cache_z);
828         ADVANCE_RING();
829         /* flush & purge 3d */
830         BEGIN_RING(2);
831         OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
832         OUT_RING(cache_3d);
833         ADVANCE_RING();
834         /* flush & purge texture */
835         BEGIN_RING(2);
836         OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
837         OUT_RING(0);
838         ADVANCE_RING();
839         /* FIXME: is this one really needed ? */
840         BEGIN_RING(2);
841         OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
842         OUT_RING(0);
843         ADVANCE_RING();
844         BEGIN_RING(2);
845         OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
846         OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
847         ADVANCE_RING();
848         /* flush & purge 2d through E2 as RB2D will trigger lockup */
849         BEGIN_RING(4);
850         OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
851         OUT_RING(cache_2d);
852         OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
853         OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
854                  RADEON_WAIT_HOST_IDLECLEAN);
855         ADVANCE_RING();
856         /* set flush & purge flags */
857         dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
858 }
859
860 /**
861  * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
862  * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
863  * be careful about how this function is called.
864  */
865 static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
866 {
867         drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
868         struct drm_radeon_master_private *master_priv = master->driver_priv;
869
870         buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
871         buf->pending = 1;
872         buf->used = 0;
873 }
874
875 static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
876                           drm_r300_cmd_header_t header)
877 {
878         u32 wait_until;
879         RING_LOCALS;
880
881         if (!header.wait.flags)
882                 return;
883
884         wait_until = 0;
885
886         switch(header.wait.flags) {
887         case R300_WAIT_2D:
888                 wait_until = RADEON_WAIT_2D_IDLE;
889                 break;
890         case R300_WAIT_3D:
891                 wait_until = RADEON_WAIT_3D_IDLE;
892                 break;
893         case R300_NEW_WAIT_2D_3D:
894                 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
895                 break;
896         case R300_NEW_WAIT_2D_2D_CLEAN:
897                 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
898                 break;
899         case R300_NEW_WAIT_3D_3D_CLEAN:
900                 wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
901                 break;
902         case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
903                 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
904                 wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
905                 break;
906         default:
907                 return;
908         }
909
910         BEGIN_RING(2);
911         OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
912         OUT_RING(wait_until);
913         ADVANCE_RING();
914 }
915
916 static int r300_scratch(drm_radeon_private_t *dev_priv,
917                         drm_radeon_kcmd_buffer_t *cmdbuf,
918                         drm_r300_cmd_header_t header)
919 {
920         u32 *ref_age_base;
921         u32 i, buf_idx, h_pending;
922         u64 ptr_addr;
923         RING_LOCALS;
924
925         if (cmdbuf->bufsz <
926             (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
927                 return -EINVAL;
928         }
929
930         if (header.scratch.reg >= 5) {
931                 return -EINVAL;
932         }
933
934         dev_priv->scratch_ages[header.scratch.reg]++;
935
936         ptr_addr = get_unaligned((u64 *)cmdbuf->buf);
937         ref_age_base = (u32 *)(unsigned long)ptr_addr;
938
939         cmdbuf->buf += sizeof(u64);
940         cmdbuf->bufsz -= sizeof(u64);
941
942         for (i=0; i < header.scratch.n_bufs; i++) {
943                 buf_idx = *(u32 *)cmdbuf->buf;
944                 buf_idx *= 2; /* 8 bytes per buf */
945
946                 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
947                         return -EINVAL;
948                 }
949
950                 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
951                         return -EINVAL;
952                 }
953
954                 if (h_pending == 0) {
955                         return -EINVAL;
956                 }
957
958                 h_pending--;
959
960                 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
961                         return -EINVAL;
962                 }
963
964                 cmdbuf->buf += sizeof(buf_idx);
965                 cmdbuf->bufsz -= sizeof(buf_idx);
966         }
967
968         BEGIN_RING(2);
969         OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
970         OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
971         ADVANCE_RING();
972
973         return 0;
974 }
975
976 /**
977  * Uploads user-supplied vertex program instructions or parameters onto
978  * the graphics card.
979  * Called by r300_do_cp_cmdbuf.
980  */
981 static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
982                                        drm_radeon_kcmd_buffer_t *cmdbuf,
983                                        drm_r300_cmd_header_t header)
984 {
985         int sz;
986         int addr;
987         int type;
988         int clamp;
989         int stride;
990         RING_LOCALS;
991
992         sz = header.r500fp.count;
993         /* address is 9 bits 0 - 8, bit 1 of flags is part of address */
994         addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
995
996         type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
997         clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
998
999         addr |= (type << 16);
1000         addr |= (clamp << 17);
1001
1002         stride = type ? 4 : 6;
1003
1004         DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
1005         if (!sz)
1006                 return 0;
1007         if (sz * stride * 4 > cmdbuf->bufsz)
1008                 return -EINVAL;
1009
1010         BEGIN_RING(3 + sz * stride);
1011         OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
1012         OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
1013         OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
1014
1015         ADVANCE_RING();
1016
1017         cmdbuf->buf += sz * stride * 4;
1018         cmdbuf->bufsz -= sz * stride * 4;
1019
1020         return 0;
1021 }
1022
1023
1024 /**
1025  * Parses and validates a user-supplied command buffer and emits appropriate
1026  * commands on the DMA ring buffer.
1027  * Called by the ioctl handler function radeon_cp_cmdbuf.
1028  */
1029 int r300_do_cp_cmdbuf(struct drm_device *dev,
1030                       struct drm_file *file_priv,
1031                       drm_radeon_kcmd_buffer_t *cmdbuf)
1032 {
1033         drm_radeon_private_t *dev_priv = dev->dev_private;
1034         struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
1035         struct drm_device_dma *dma = dev->dma;
1036         struct drm_buf *buf = NULL;
1037         int emit_dispatch_age = 0;
1038         int ret = 0;
1039
1040         DRM_DEBUG("\n");
1041
1042         /* pacify */
1043         r300_pacify(dev_priv);
1044
1045         if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
1046                 ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
1047                 if (ret)
1048                         goto cleanup;
1049         }
1050
1051         while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
1052                 int idx;
1053                 drm_r300_cmd_header_t header;
1054
1055                 header.u = *(unsigned int *)cmdbuf->buf;
1056
1057                 cmdbuf->buf += sizeof(header);
1058                 cmdbuf->bufsz -= sizeof(header);
1059
1060                 switch (header.header.cmd_type) {
1061                 case R300_CMD_PACKET0:
1062                         DRM_DEBUG("R300_CMD_PACKET0\n");
1063                         ret = r300_emit_packet0(dev_priv, cmdbuf, header);
1064                         if (ret) {
1065                                 DRM_ERROR("r300_emit_packet0 failed\n");
1066                                 goto cleanup;
1067                         }
1068                         break;
1069
1070                 case R300_CMD_VPU:
1071                         DRM_DEBUG("R300_CMD_VPU\n");
1072                         ret = r300_emit_vpu(dev_priv, cmdbuf, header);
1073                         if (ret) {
1074                                 DRM_ERROR("r300_emit_vpu failed\n");
1075                                 goto cleanup;
1076                         }
1077                         break;
1078
1079                 case R300_CMD_PACKET3:
1080                         DRM_DEBUG("R300_CMD_PACKET3\n");
1081                         ret = r300_emit_packet3(dev_priv, cmdbuf, header);
1082                         if (ret) {
1083                                 DRM_ERROR("r300_emit_packet3 failed\n");
1084                                 goto cleanup;
1085                         }
1086                         break;
1087
1088                 case R300_CMD_END3D:
1089                         DRM_DEBUG("R300_CMD_END3D\n");
1090                         /* TODO:
1091                            Ideally userspace driver should not need to issue this call,
1092                            i.e. the drm driver should issue it automatically and prevent
1093                            lockups.
1094
1095                            In practice, we do not understand why this call is needed and what
1096                            it does (except for some vague guesses that it has to do with cache
1097                            coherence) and so the user space driver does it.
1098
1099                            Once we are sure which uses prevent lockups the code could be moved
1100                            into the kernel and the userspace driver will not
1101                            need to use this command.
1102
1103                            Note that issuing this command does not hurt anything
1104                            except, possibly, performance */
1105                         r300_pacify(dev_priv);
1106                         break;
1107
1108                 case R300_CMD_CP_DELAY:
1109                         /* simple enough, we can do it here */
1110                         DRM_DEBUG("R300_CMD_CP_DELAY\n");
1111                         {
1112                                 int i;
1113                                 RING_LOCALS;
1114
1115                                 BEGIN_RING(header.delay.count);
1116                                 for (i = 0; i < header.delay.count; i++)
1117                                         OUT_RING(RADEON_CP_PACKET2);
1118                                 ADVANCE_RING();
1119                         }
1120                         break;
1121
1122                 case R300_CMD_DMA_DISCARD:
1123                         DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
1124                         idx = header.dma.buf_idx;
1125                         if (idx < 0 || idx >= dma->buf_count) {
1126                                 DRM_ERROR("buffer index %d (of %d max)\n",
1127                                           idx, dma->buf_count - 1);
1128                                 ret = -EINVAL;
1129                                 goto cleanup;
1130                         }
1131
1132                         buf = dma->buflist[idx];
1133                         if (buf->file_priv != file_priv || buf->pending) {
1134                                 DRM_ERROR("bad buffer %p %p %d\n",
1135                                           buf->file_priv, file_priv,
1136                                           buf->pending);
1137                                 ret = -EINVAL;
1138                                 goto cleanup;
1139                         }
1140
1141                         emit_dispatch_age = 1;
1142                         r300_discard_buffer(dev, file_priv->master, buf);
1143                         break;
1144
1145                 case R300_CMD_WAIT:
1146                         DRM_DEBUG("R300_CMD_WAIT\n");
1147                         r300_cmd_wait(dev_priv, header);
1148                         break;
1149
1150                 case R300_CMD_SCRATCH:
1151                         DRM_DEBUG("R300_CMD_SCRATCH\n");
1152                         ret = r300_scratch(dev_priv, cmdbuf, header);
1153                         if (ret) {
1154                                 DRM_ERROR("r300_scratch failed\n");
1155                                 goto cleanup;
1156                         }
1157                         break;
1158
1159                 case R300_CMD_R500FP:
1160                         if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
1161                                 DRM_ERROR("Calling r500 command on r300 card\n");
1162                                 ret = -EINVAL;
1163                                 goto cleanup;
1164                         }
1165                         DRM_DEBUG("R300_CMD_R500FP\n");
1166                         ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
1167                         if (ret) {
1168                                 DRM_ERROR("r300_emit_r500fp failed\n");
1169                                 goto cleanup;
1170                         }
1171                         break;
1172                 default:
1173                         DRM_ERROR("bad cmd_type %i at %p\n",
1174                                   header.header.cmd_type,
1175                                   cmdbuf->buf - sizeof(header));
1176                         ret = -EINVAL;
1177                         goto cleanup;
1178                 }
1179         }
1180
1181         DRM_DEBUG("END\n");
1182
1183       cleanup:
1184         r300_pacify(dev_priv);
1185
1186         /* We emit the vertex buffer age here, outside the pacifier "brackets"
1187          * for two reasons:
1188          *  (1) This may coalesce multiple age emissions into a single one and
1189          *  (2) more importantly, some chips lock up hard when scratch registers
1190          *      are written inside the pacifier bracket.
1191          */
1192         if (emit_dispatch_age) {
1193                 RING_LOCALS;
1194
1195                 /* Emit the vertex buffer age */
1196                 BEGIN_RING(2);
1197                 RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch);
1198                 ADVANCE_RING();
1199         }
1200
1201         COMMIT_RING();
1202
1203         return ret;
1204 }