2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/kernel.h>
32 #include "r600_reg_safe.h"
34 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
35 struct radeon_cs_reloc **cs_reloc);
36 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
37 struct radeon_cs_reloc **cs_reloc);
38 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
39 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
40 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
43 struct r600_cs_track {
44 /* configuration we miror so that we use same code btw kms/ums */
51 u32 cb_color_base_last[8];
52 struct radeon_bo *cb_color_bo[8];
53 u64 cb_color_bo_mc[8];
54 u32 cb_color_bo_offset[8];
55 struct radeon_bo *cb_color_frag_bo[8];
56 struct radeon_bo *cb_color_tile_bo[8];
58 u32 cb_color_size_idx[8];
63 u32 vgt_strmout_buffer_en;
66 u32 db_depth_size_idx;
70 struct radeon_bo *db_bo;
74 #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
75 #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
76 #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0, CHIP_R600 }
77 #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
78 #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0, CHIP_R600 }
79 #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
80 #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
81 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
88 enum radeon_family min_family;
91 static const struct gpu_formats color_formats_table[] = {
93 FMT_8_BIT(V_038004_COLOR_8, 1),
94 FMT_8_BIT(V_038004_COLOR_4_4, 1),
95 FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
96 FMT_8_BIT(V_038004_FMT_1, 0),
99 FMT_16_BIT(V_038004_COLOR_16, 1),
100 FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
101 FMT_16_BIT(V_038004_COLOR_8_8, 1),
102 FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
103 FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
104 FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
105 FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
106 FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
109 FMT_24_BIT(V_038004_FMT_8_8_8),
112 FMT_32_BIT(V_038004_COLOR_32, 1),
113 FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
114 FMT_32_BIT(V_038004_COLOR_16_16, 1),
115 FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
116 FMT_32_BIT(V_038004_COLOR_8_24, 1),
117 FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
118 FMT_32_BIT(V_038004_COLOR_24_8, 1),
119 FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
120 FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
121 FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
122 FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
123 FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
124 FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
125 FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
126 FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
127 FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
128 FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
129 FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
132 FMT_48_BIT(V_038004_FMT_16_16_16),
133 FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
136 FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
137 FMT_64_BIT(V_038004_COLOR_32_32, 1),
138 FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
139 FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
140 FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
142 FMT_96_BIT(V_038004_FMT_32_32_32),
143 FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
146 FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
147 FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
149 [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
150 [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
152 /* block compressed formats */
153 [V_038004_FMT_BC1] = { 4, 4, 8, 0 },
154 [V_038004_FMT_BC2] = { 4, 4, 16, 0 },
155 [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
156 [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
157 [V_038004_FMT_BC5] = { 4, 4, 16, 0},
158 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
159 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
161 /* The other Evergreen formats */
162 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
165 static bool fmt_is_valid_color(u32 format)
167 if (format >= ARRAY_SIZE(color_formats_table))
170 if (color_formats_table[format].valid_color)
176 static bool fmt_is_valid_texture(u32 format, enum radeon_family family)
178 if (format >= ARRAY_SIZE(color_formats_table))
181 if (family < color_formats_table[format].min_family)
184 if (color_formats_table[format].blockwidth > 0)
190 static int fmt_get_blocksize(u32 format)
192 if (format >= ARRAY_SIZE(color_formats_table))
195 return color_formats_table[format].blocksize;
198 static int fmt_get_nblocksx(u32 format, u32 w)
202 if (format >= ARRAY_SIZE(color_formats_table))
205 bw = color_formats_table[format].blockwidth;
209 return (w + bw - 1) / bw;
212 static int fmt_get_nblocksy(u32 format, u32 h)
216 if (format >= ARRAY_SIZE(color_formats_table))
219 bh = color_formats_table[format].blockheight;
223 return (h + bh - 1) / bh;
226 struct array_mode_checker {
235 /* returns alignment in pixels for pitch/height/depth and bytes for base */
236 static int r600_get_array_mode_alignment(struct array_mode_checker *values,
244 u32 macro_tile_width = values->nbanks;
245 u32 macro_tile_height = values->npipes;
246 u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
247 u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
249 switch (values->array_mode) {
250 case ARRAY_LINEAR_GENERAL:
251 /* technically tile_width/_height for pitch/height */
252 *pitch_align = 1; /* tile_width */
253 *height_align = 1; /* tile_height */
257 case ARRAY_LINEAR_ALIGNED:
258 *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
259 *height_align = tile_height;
261 *base_align = values->group_size;
263 case ARRAY_1D_TILED_THIN1:
264 *pitch_align = max((u32)tile_width,
265 (u32)(values->group_size /
266 (tile_height * values->blocksize * values->nsamples)));
267 *height_align = tile_height;
269 *base_align = values->group_size;
271 case ARRAY_2D_TILED_THIN1:
272 *pitch_align = max((u32)macro_tile_width,
273 (u32)(((values->group_size / tile_height) /
274 (values->blocksize * values->nsamples)) *
275 values->nbanks)) * tile_width;
276 *height_align = macro_tile_height * tile_height;
278 *base_align = max(macro_tile_bytes,
279 (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
288 static void r600_cs_track_init(struct r600_cs_track *track)
292 /* assume DX9 mode */
293 track->sq_config = DX9_CONSTS;
294 for (i = 0; i < 8; i++) {
295 track->cb_color_base_last[i] = 0;
296 track->cb_color_size[i] = 0;
297 track->cb_color_size_idx[i] = 0;
298 track->cb_color_info[i] = 0;
299 track->cb_color_bo[i] = NULL;
300 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
301 track->cb_color_bo_mc[i] = 0xFFFFFFFF;
303 track->cb_target_mask = 0xFFFFFFFF;
304 track->cb_shader_mask = 0xFFFFFFFF;
306 track->db_bo_mc = 0xFFFFFFFF;
307 /* assume the biggest format and that htile is enabled */
308 track->db_depth_info = 7 | (1 << 25);
309 track->db_depth_view = 0xFFFFC000;
310 track->db_depth_size = 0xFFFFFFFF;
311 track->db_depth_size_idx = 0;
312 track->db_depth_control = 0xFFFFFFFF;
315 static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
317 struct r600_cs_track *track = p->track;
318 u32 slice_tile_max, size, tmp;
319 u32 height, height_align, pitch, pitch_align, depth_align;
320 u64 base_offset, base_align;
321 struct array_mode_checker array_check;
322 volatile u32 *ib = p->ib->ptr;
325 if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
326 dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
329 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
330 format = G_0280A0_FORMAT(track->cb_color_info[i]);
331 if (!fmt_is_valid_color(format)) {
332 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
333 __func__, __LINE__, format,
334 i, track->cb_color_info[i]);
337 /* pitch in pixels */
338 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
339 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
340 slice_tile_max *= 64;
341 height = slice_tile_max / pitch;
344 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
346 base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
347 array_check.array_mode = array_mode;
348 array_check.group_size = track->group_size;
349 array_check.nbanks = track->nbanks;
350 array_check.npipes = track->npipes;
351 array_check.nsamples = track->nsamples;
352 array_check.blocksize = fmt_get_blocksize(format);
353 if (r600_get_array_mode_alignment(&array_check,
354 &pitch_align, &height_align, &depth_align, &base_align)) {
355 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
356 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
357 track->cb_color_info[i]);
360 switch (array_mode) {
361 case V_0280A0_ARRAY_LINEAR_GENERAL:
363 case V_0280A0_ARRAY_LINEAR_ALIGNED:
365 case V_0280A0_ARRAY_1D_TILED_THIN1:
366 /* avoid breaking userspace */
370 case V_0280A0_ARRAY_2D_TILED_THIN1:
373 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
374 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
375 track->cb_color_info[i]);
379 if (!IS_ALIGNED(pitch, pitch_align)) {
380 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
381 __func__, __LINE__, pitch, pitch_align, array_mode);
384 if (!IS_ALIGNED(height, height_align)) {
385 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
386 __func__, __LINE__, height, height_align, array_mode);
389 if (!IS_ALIGNED(base_offset, base_align)) {
390 dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
391 base_offset, base_align, array_mode);
396 tmp = fmt_get_nblocksy(format, height) * fmt_get_nblocksx(format, pitch) * fmt_get_blocksize(format);
397 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
398 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
399 /* the initial DDX does bad things with the CB size occasionally */
400 /* it rounds up height too far for slice tile max but the BO is smaller */
401 /* r600c,g also seem to flush at bad times in some apps resulting in
402 * bogus values here. So for linear just allow anything to avoid breaking
406 dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
408 track->cb_color_bo_offset[i], tmp,
409 radeon_bo_size(track->cb_color_bo[i]));
414 tmp = (height * pitch) >> 6;
415 if (tmp < slice_tile_max)
416 slice_tile_max = tmp;
417 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
418 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
419 ib[track->cb_color_size_idx[i]] = tmp;
423 static int r600_cs_track_check(struct radeon_cs_parser *p)
425 struct r600_cs_track *track = p->track;
428 volatile u32 *ib = p->ib->ptr;
430 /* on legacy kernel we don't perform advanced check */
433 /* we don't support out buffer yet */
434 if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
435 dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
438 /* check that we have a cb for each enabled target, we don't check
439 * shader_mask because it seems mesa isn't always setting it :(
441 tmp = track->cb_target_mask;
442 for (i = 0; i < 8; i++) {
443 if ((tmp >> (i * 4)) & 0xF) {
444 /* at least one component is enabled */
445 if (track->cb_color_bo[i] == NULL) {
446 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
447 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
450 /* perform rewrite of CB_COLOR[0-7]_SIZE */
451 r = r600_cs_track_validate_cb(p, i);
456 /* Check depth buffer */
457 if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
458 G_028800_Z_ENABLE(track->db_depth_control)) {
459 u32 nviews, bpe, ntiles, size, slice_tile_max;
460 u32 height, height_align, pitch, pitch_align, depth_align;
461 u64 base_offset, base_align;
462 struct array_mode_checker array_check;
465 if (track->db_bo == NULL) {
466 dev_warn(p->dev, "z/stencil with no depth buffer\n");
469 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
470 dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
473 switch (G_028010_FORMAT(track->db_depth_info)) {
474 case V_028010_DEPTH_16:
477 case V_028010_DEPTH_X8_24:
478 case V_028010_DEPTH_8_24:
479 case V_028010_DEPTH_X8_24_FLOAT:
480 case V_028010_DEPTH_8_24_FLOAT:
481 case V_028010_DEPTH_32_FLOAT:
484 case V_028010_DEPTH_X24_8_32_FLOAT:
488 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
491 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
492 if (!track->db_depth_size_idx) {
493 dev_warn(p->dev, "z/stencil buffer size not set\n");
496 tmp = radeon_bo_size(track->db_bo) - track->db_offset;
497 tmp = (tmp / bpe) >> 6;
499 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
500 track->db_depth_size, bpe, track->db_offset,
501 radeon_bo_size(track->db_bo));
504 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
506 size = radeon_bo_size(track->db_bo);
507 /* pitch in pixels */
508 pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
509 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
510 slice_tile_max *= 64;
511 height = slice_tile_max / pitch;
514 base_offset = track->db_bo_mc + track->db_offset;
515 array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
516 array_check.array_mode = array_mode;
517 array_check.group_size = track->group_size;
518 array_check.nbanks = track->nbanks;
519 array_check.npipes = track->npipes;
520 array_check.nsamples = track->nsamples;
521 array_check.blocksize = bpe;
522 if (r600_get_array_mode_alignment(&array_check,
523 &pitch_align, &height_align, &depth_align, &base_align)) {
524 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
525 G_028010_ARRAY_MODE(track->db_depth_info),
526 track->db_depth_info);
529 switch (array_mode) {
530 case V_028010_ARRAY_1D_TILED_THIN1:
531 /* don't break userspace */
534 case V_028010_ARRAY_2D_TILED_THIN1:
537 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
538 G_028010_ARRAY_MODE(track->db_depth_info),
539 track->db_depth_info);
543 if (!IS_ALIGNED(pitch, pitch_align)) {
544 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
545 __func__, __LINE__, pitch, pitch_align, array_mode);
548 if (!IS_ALIGNED(height, height_align)) {
549 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
550 __func__, __LINE__, height, height_align, array_mode);
553 if (!IS_ALIGNED(base_offset, base_align)) {
554 dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
555 base_offset, base_align, array_mode);
559 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
560 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
561 tmp = ntiles * bpe * 64 * nviews;
562 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
563 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
565 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
566 radeon_bo_size(track->db_bo));
575 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
576 * @parser: parser structure holding parsing context.
577 * @pkt: where to store packet informations
579 * Assume that chunk_ib_index is properly set. Will return -EINVAL
580 * if packet is bigger than remaining ib size. or if packets is unknown.
582 int r600_cs_packet_parse(struct radeon_cs_parser *p,
583 struct radeon_cs_packet *pkt,
586 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
589 if (idx >= ib_chunk->length_dw) {
590 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
591 idx, ib_chunk->length_dw);
594 header = radeon_get_ib_value(p, idx);
596 pkt->type = CP_PACKET_GET_TYPE(header);
597 pkt->count = CP_PACKET_GET_COUNT(header);
601 pkt->reg = CP_PACKET0_GET_REG(header);
604 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
610 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
613 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
614 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
615 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
622 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
623 * @parser: parser structure holding parsing context.
624 * @data: pointer to relocation data
625 * @offset_start: starting offset
626 * @offset_mask: offset mask (to align start offset on)
627 * @reloc: reloc informations
629 * Check next packet is relocation packet3, do bo validation and compute
630 * GPU offset using the provided start.
632 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
633 struct radeon_cs_reloc **cs_reloc)
635 struct radeon_cs_chunk *relocs_chunk;
636 struct radeon_cs_packet p3reloc;
640 if (p->chunk_relocs_idx == -1) {
641 DRM_ERROR("No relocation chunk !\n");
645 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
646 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
650 p->idx += p3reloc.count + 2;
651 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
652 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
656 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
657 if (idx >= relocs_chunk->length_dw) {
658 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
659 idx, relocs_chunk->length_dw);
662 /* FIXME: we assume reloc size is 4 dwords */
663 *cs_reloc = p->relocs_ptr[(idx / 4)];
668 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
669 * @parser: parser structure holding parsing context.
670 * @data: pointer to relocation data
671 * @offset_start: starting offset
672 * @offset_mask: offset mask (to align start offset on)
673 * @reloc: reloc informations
675 * Check next packet is relocation packet3, do bo validation and compute
676 * GPU offset using the provided start.
678 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
679 struct radeon_cs_reloc **cs_reloc)
681 struct radeon_cs_chunk *relocs_chunk;
682 struct radeon_cs_packet p3reloc;
686 if (p->chunk_relocs_idx == -1) {
687 DRM_ERROR("No relocation chunk !\n");
691 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
692 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
696 p->idx += p3reloc.count + 2;
697 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
698 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
702 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
703 if (idx >= relocs_chunk->length_dw) {
704 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
705 idx, relocs_chunk->length_dw);
708 *cs_reloc = p->relocs;
709 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
710 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
715 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
716 * @parser: parser structure holding parsing context.
718 * Check next packet is relocation packet3, do bo validation and compute
719 * GPU offset using the provided start.
721 static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
723 struct radeon_cs_packet p3reloc;
726 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
730 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
737 * r600_cs_packet_next_vline() - parse userspace VLINE packet
738 * @parser: parser structure holding parsing context.
740 * Userspace sends a special sequence for VLINE waits.
741 * PACKET0 - VLINE_START_END + value
742 * PACKET3 - WAIT_REG_MEM poll vline status reg
743 * RELOC (P3) - crtc_id in reloc.
745 * This function parses this and relocates the VLINE START END
746 * and WAIT_REG_MEM packets to the correct crtc.
747 * It also detects a switched off crtc and nulls out the
750 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
752 struct drm_mode_object *obj;
753 struct drm_crtc *crtc;
754 struct radeon_crtc *radeon_crtc;
755 struct radeon_cs_packet p3reloc, wait_reg_mem;
758 uint32_t header, h_idx, reg, wait_reg_mem_info;
759 volatile uint32_t *ib;
763 /* parse the WAIT_REG_MEM */
764 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
768 /* check its a WAIT_REG_MEM */
769 if (wait_reg_mem.type != PACKET_TYPE3 ||
770 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
771 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
775 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
776 /* bit 4 is reg (0) or mem (1) */
777 if (wait_reg_mem_info & 0x10) {
778 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
781 /* waiting for value to be equal */
782 if ((wait_reg_mem_info & 0x7) != 0x3) {
783 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
786 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
787 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
791 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
792 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
796 /* jump over the NOP */
797 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
802 p->idx += wait_reg_mem.count + 2;
803 p->idx += p3reloc.count + 2;
805 header = radeon_get_ib_value(p, h_idx);
806 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
807 reg = CP_PACKET0_GET_REG(header);
809 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
811 DRM_ERROR("cannot find crtc %d\n", crtc_id);
814 crtc = obj_to_crtc(obj);
815 radeon_crtc = to_radeon_crtc(crtc);
816 crtc_id = radeon_crtc->crtc_id;
818 if (!crtc->enabled) {
819 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
820 ib[h_idx + 2] = PACKET2(0);
821 ib[h_idx + 3] = PACKET2(0);
822 ib[h_idx + 4] = PACKET2(0);
823 ib[h_idx + 5] = PACKET2(0);
824 ib[h_idx + 6] = PACKET2(0);
825 ib[h_idx + 7] = PACKET2(0);
826 ib[h_idx + 8] = PACKET2(0);
827 } else if (crtc_id == 1) {
829 case AVIVO_D1MODE_VLINE_START_END:
830 header &= ~R600_CP_PACKET0_REG_MASK;
831 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
834 DRM_ERROR("unknown crtc reloc\n");
838 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
844 static int r600_packet0_check(struct radeon_cs_parser *p,
845 struct radeon_cs_packet *pkt,
846 unsigned idx, unsigned reg)
851 case AVIVO_D1MODE_VLINE_START_END:
852 r = r600_cs_packet_parse_vline(p);
854 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
860 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
867 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
868 struct radeon_cs_packet *pkt)
876 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
877 r = r600_packet0_check(p, pkt, idx, reg);
886 * r600_cs_check_reg() - check if register is authorized or not
887 * @parser: parser structure holding parsing context
888 * @reg: register we are testing
889 * @idx: index into the cs buffer
891 * This function will test against r600_reg_safe_bm and return 0
892 * if register is safe. If register is not flag as safe this function
893 * will test it against a list of register needind special handling.
895 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
897 struct r600_cs_track *track = (struct r600_cs_track *)p->track;
898 struct radeon_cs_reloc *reloc;
903 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
904 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
907 m = 1 << ((reg >> 2) & 31);
908 if (!(r600_reg_safe_bm[i] & m))
912 /* force following reg to 0 in an attempt to disable out buffer
913 * which will need us to better understand how it works to perform
914 * security check on it (Jerome)
916 case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
917 case R_008C44_SQ_ESGS_RING_SIZE:
918 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
919 case R_008C54_SQ_ESTMP_RING_SIZE:
920 case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
921 case R_008C74_SQ_FBUF_RING_SIZE:
922 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
923 case R_008C5C_SQ_GSTMP_RING_SIZE:
924 case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
925 case R_008C4C_SQ_GSVS_RING_SIZE:
926 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
927 case R_008C6C_SQ_PSTMP_RING_SIZE:
928 case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
929 case R_008C7C_SQ_REDUC_RING_SIZE:
930 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
931 case R_008C64_SQ_VSTMP_RING_SIZE:
932 case R_0288C8_SQ_GS_VERT_ITEMSIZE:
933 /* get value to populate the IB don't remove */
934 tmp =radeon_get_ib_value(p, idx);
938 track->sq_config = radeon_get_ib_value(p, idx);
940 case R_028800_DB_DEPTH_CONTROL:
941 track->db_depth_control = radeon_get_ib_value(p, idx);
943 case R_028010_DB_DEPTH_INFO:
944 if (r600_cs_packet_next_is_pkt3_nop(p)) {
945 r = r600_cs_packet_next_reloc(p, &reloc);
947 dev_warn(p->dev, "bad SET_CONTEXT_REG "
951 track->db_depth_info = radeon_get_ib_value(p, idx);
952 ib[idx] &= C_028010_ARRAY_MODE;
953 track->db_depth_info &= C_028010_ARRAY_MODE;
954 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
955 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
956 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
958 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
959 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
962 track->db_depth_info = radeon_get_ib_value(p, idx);
964 case R_028004_DB_DEPTH_VIEW:
965 track->db_depth_view = radeon_get_ib_value(p, idx);
967 case R_028000_DB_DEPTH_SIZE:
968 track->db_depth_size = radeon_get_ib_value(p, idx);
969 track->db_depth_size_idx = idx;
971 case R_028AB0_VGT_STRMOUT_EN:
972 track->vgt_strmout_en = radeon_get_ib_value(p, idx);
974 case R_028B20_VGT_STRMOUT_BUFFER_EN:
975 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
977 case R_028238_CB_TARGET_MASK:
978 track->cb_target_mask = radeon_get_ib_value(p, idx);
980 case R_02823C_CB_SHADER_MASK:
981 track->cb_shader_mask = radeon_get_ib_value(p, idx);
983 case R_028C04_PA_SC_AA_CONFIG:
984 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
985 track->nsamples = 1 << tmp;
987 case R_0280A0_CB_COLOR0_INFO:
988 case R_0280A4_CB_COLOR1_INFO:
989 case R_0280A8_CB_COLOR2_INFO:
990 case R_0280AC_CB_COLOR3_INFO:
991 case R_0280B0_CB_COLOR4_INFO:
992 case R_0280B4_CB_COLOR5_INFO:
993 case R_0280B8_CB_COLOR6_INFO:
994 case R_0280BC_CB_COLOR7_INFO:
995 if (r600_cs_packet_next_is_pkt3_nop(p)) {
996 r = r600_cs_packet_next_reloc(p, &reloc);
998 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1001 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1002 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1003 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1004 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1005 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1006 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1007 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1008 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1011 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1012 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1015 case R_028060_CB_COLOR0_SIZE:
1016 case R_028064_CB_COLOR1_SIZE:
1017 case R_028068_CB_COLOR2_SIZE:
1018 case R_02806C_CB_COLOR3_SIZE:
1019 case R_028070_CB_COLOR4_SIZE:
1020 case R_028074_CB_COLOR5_SIZE:
1021 case R_028078_CB_COLOR6_SIZE:
1022 case R_02807C_CB_COLOR7_SIZE:
1023 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
1024 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
1025 track->cb_color_size_idx[tmp] = idx;
1027 /* This register were added late, there is userspace
1028 * which does provide relocation for those but set
1029 * 0 offset. In order to avoid breaking old userspace
1030 * we detect this and set address to point to last
1031 * CB_COLOR0_BASE, note that if userspace doesn't set
1032 * CB_COLOR0_BASE before this register we will report
1033 * error. Old userspace always set CB_COLOR0_BASE
1034 * before any of this.
1036 case R_0280E0_CB_COLOR0_FRAG:
1037 case R_0280E4_CB_COLOR1_FRAG:
1038 case R_0280E8_CB_COLOR2_FRAG:
1039 case R_0280EC_CB_COLOR3_FRAG:
1040 case R_0280F0_CB_COLOR4_FRAG:
1041 case R_0280F4_CB_COLOR5_FRAG:
1042 case R_0280F8_CB_COLOR6_FRAG:
1043 case R_0280FC_CB_COLOR7_FRAG:
1044 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
1045 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
1046 if (!track->cb_color_base_last[tmp]) {
1047 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1050 ib[idx] = track->cb_color_base_last[tmp];
1051 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
1053 r = r600_cs_packet_next_reloc(p, &reloc);
1055 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1058 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1059 track->cb_color_frag_bo[tmp] = reloc->robj;
1062 case R_0280C0_CB_COLOR0_TILE:
1063 case R_0280C4_CB_COLOR1_TILE:
1064 case R_0280C8_CB_COLOR2_TILE:
1065 case R_0280CC_CB_COLOR3_TILE:
1066 case R_0280D0_CB_COLOR4_TILE:
1067 case R_0280D4_CB_COLOR5_TILE:
1068 case R_0280D8_CB_COLOR6_TILE:
1069 case R_0280DC_CB_COLOR7_TILE:
1070 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
1071 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
1072 if (!track->cb_color_base_last[tmp]) {
1073 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1076 ib[idx] = track->cb_color_base_last[tmp];
1077 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
1079 r = r600_cs_packet_next_reloc(p, &reloc);
1081 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1084 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1085 track->cb_color_tile_bo[tmp] = reloc->robj;
1088 case CB_COLOR0_BASE:
1089 case CB_COLOR1_BASE:
1090 case CB_COLOR2_BASE:
1091 case CB_COLOR3_BASE:
1092 case CB_COLOR4_BASE:
1093 case CB_COLOR5_BASE:
1094 case CB_COLOR6_BASE:
1095 case CB_COLOR7_BASE:
1096 r = r600_cs_packet_next_reloc(p, &reloc);
1098 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1102 tmp = (reg - CB_COLOR0_BASE) / 4;
1103 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1104 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1105 track->cb_color_base_last[tmp] = ib[idx];
1106 track->cb_color_bo[tmp] = reloc->robj;
1107 track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
1110 r = r600_cs_packet_next_reloc(p, &reloc);
1112 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1116 track->db_offset = radeon_get_ib_value(p, idx) << 8;
1117 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1118 track->db_bo = reloc->robj;
1119 track->db_bo_mc = reloc->lobj.gpu_offset;
1121 case DB_HTILE_DATA_BASE:
1122 case SQ_PGM_START_FS:
1123 case SQ_PGM_START_ES:
1124 case SQ_PGM_START_VS:
1125 case SQ_PGM_START_GS:
1126 case SQ_PGM_START_PS:
1127 case SQ_ALU_CONST_CACHE_GS_0:
1128 case SQ_ALU_CONST_CACHE_GS_1:
1129 case SQ_ALU_CONST_CACHE_GS_2:
1130 case SQ_ALU_CONST_CACHE_GS_3:
1131 case SQ_ALU_CONST_CACHE_GS_4:
1132 case SQ_ALU_CONST_CACHE_GS_5:
1133 case SQ_ALU_CONST_CACHE_GS_6:
1134 case SQ_ALU_CONST_CACHE_GS_7:
1135 case SQ_ALU_CONST_CACHE_GS_8:
1136 case SQ_ALU_CONST_CACHE_GS_9:
1137 case SQ_ALU_CONST_CACHE_GS_10:
1138 case SQ_ALU_CONST_CACHE_GS_11:
1139 case SQ_ALU_CONST_CACHE_GS_12:
1140 case SQ_ALU_CONST_CACHE_GS_13:
1141 case SQ_ALU_CONST_CACHE_GS_14:
1142 case SQ_ALU_CONST_CACHE_GS_15:
1143 case SQ_ALU_CONST_CACHE_PS_0:
1144 case SQ_ALU_CONST_CACHE_PS_1:
1145 case SQ_ALU_CONST_CACHE_PS_2:
1146 case SQ_ALU_CONST_CACHE_PS_3:
1147 case SQ_ALU_CONST_CACHE_PS_4:
1148 case SQ_ALU_CONST_CACHE_PS_5:
1149 case SQ_ALU_CONST_CACHE_PS_6:
1150 case SQ_ALU_CONST_CACHE_PS_7:
1151 case SQ_ALU_CONST_CACHE_PS_8:
1152 case SQ_ALU_CONST_CACHE_PS_9:
1153 case SQ_ALU_CONST_CACHE_PS_10:
1154 case SQ_ALU_CONST_CACHE_PS_11:
1155 case SQ_ALU_CONST_CACHE_PS_12:
1156 case SQ_ALU_CONST_CACHE_PS_13:
1157 case SQ_ALU_CONST_CACHE_PS_14:
1158 case SQ_ALU_CONST_CACHE_PS_15:
1159 case SQ_ALU_CONST_CACHE_VS_0:
1160 case SQ_ALU_CONST_CACHE_VS_1:
1161 case SQ_ALU_CONST_CACHE_VS_2:
1162 case SQ_ALU_CONST_CACHE_VS_3:
1163 case SQ_ALU_CONST_CACHE_VS_4:
1164 case SQ_ALU_CONST_CACHE_VS_5:
1165 case SQ_ALU_CONST_CACHE_VS_6:
1166 case SQ_ALU_CONST_CACHE_VS_7:
1167 case SQ_ALU_CONST_CACHE_VS_8:
1168 case SQ_ALU_CONST_CACHE_VS_9:
1169 case SQ_ALU_CONST_CACHE_VS_10:
1170 case SQ_ALU_CONST_CACHE_VS_11:
1171 case SQ_ALU_CONST_CACHE_VS_12:
1172 case SQ_ALU_CONST_CACHE_VS_13:
1173 case SQ_ALU_CONST_CACHE_VS_14:
1174 case SQ_ALU_CONST_CACHE_VS_15:
1175 r = r600_cs_packet_next_reloc(p, &reloc);
1177 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1181 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1183 case SX_MEMORY_EXPORT_BASE:
1184 r = r600_cs_packet_next_reloc(p, &reloc);
1186 dev_warn(p->dev, "bad SET_CONFIG_REG "
1190 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1193 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1199 static unsigned mip_minify(unsigned size, unsigned level)
1203 val = max(1U, size >> level);
1205 val = roundup_pow_of_two(val);
1209 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1210 unsigned w0, unsigned h0, unsigned d0, unsigned format,
1211 unsigned block_align, unsigned height_align, unsigned base_align,
1212 unsigned *l0_size, unsigned *mipmap_size)
1214 unsigned offset, i, level;
1215 unsigned width, height, depth, size;
1218 unsigned nlevels = llevel - blevel + 1;
1221 blocksize = fmt_get_blocksize(format);
1223 w0 = mip_minify(w0, 0);
1224 h0 = mip_minify(h0, 0);
1225 d0 = mip_minify(d0, 0);
1226 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1227 width = mip_minify(w0, i);
1228 nbx = fmt_get_nblocksx(format, width);
1230 nbx = round_up(nbx, block_align);
1232 height = mip_minify(h0, i);
1233 nby = fmt_get_nblocksy(format, height);
1234 nby = round_up(nby, height_align);
1236 depth = mip_minify(d0, i);
1238 size = nbx * nby * blocksize;
1247 if (i == 0 || i == 1)
1248 offset = round_up(offset, base_align);
1252 *mipmap_size = offset;
1254 *mipmap_size = *l0_size;
1256 *mipmap_size -= *l0_size;
1260 * r600_check_texture_resource() - check if register is authorized or not
1261 * @p: parser structure holding parsing context
1262 * @idx: index into the cs buffer
1263 * @texture: texture's bo structure
1264 * @mipmap: mipmap's bo structure
1266 * This function will check that the resource has valid field and that
1267 * the texture and mipmap bo object are big enough to cover this resource.
1269 static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1270 struct radeon_bo *texture,
1271 struct radeon_bo *mipmap,
1276 struct r600_cs_track *track = p->track;
1277 u32 nfaces, llevel, blevel, w0, h0, d0;
1278 u32 word0, word1, l0_size, mipmap_size, word2, word3;
1279 u32 height_align, pitch, pitch_align, depth_align;
1280 u32 array, barray, larray;
1282 struct array_mode_checker array_check;
1285 /* on legacy kernel we don't perform advanced check */
1286 if (p->rdev == NULL)
1289 /* convert to bytes */
1293 word0 = radeon_get_ib_value(p, idx + 0);
1294 if (tiling_flags & RADEON_TILING_MACRO)
1295 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1296 else if (tiling_flags & RADEON_TILING_MICRO)
1297 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1298 word1 = radeon_get_ib_value(p, idx + 1);
1299 w0 = G_038000_TEX_WIDTH(word0) + 1;
1300 h0 = G_038004_TEX_HEIGHT(word1) + 1;
1301 d0 = G_038004_TEX_DEPTH(word1);
1303 switch (G_038000_DIM(word0)) {
1304 case V_038000_SQ_TEX_DIM_1D:
1305 case V_038000_SQ_TEX_DIM_2D:
1306 case V_038000_SQ_TEX_DIM_3D:
1308 case V_038000_SQ_TEX_DIM_CUBEMAP:
1309 if (p->family >= CHIP_RV770)
1314 case V_038000_SQ_TEX_DIM_1D_ARRAY:
1315 case V_038000_SQ_TEX_DIM_2D_ARRAY:
1318 case V_038000_SQ_TEX_DIM_2D_MSAA:
1319 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1321 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1324 format = G_038004_DATA_FORMAT(word1);
1325 if (!fmt_is_valid_texture(format, p->family)) {
1326 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1327 __func__, __LINE__, format);
1331 /* pitch in texels */
1332 pitch = (G_038000_PITCH(word0) + 1) * 8;
1333 array_check.array_mode = G_038000_TILE_MODE(word0);
1334 array_check.group_size = track->group_size;
1335 array_check.nbanks = track->nbanks;
1336 array_check.npipes = track->npipes;
1337 array_check.nsamples = 1;
1338 array_check.blocksize = fmt_get_blocksize(format);
1339 if (r600_get_array_mode_alignment(&array_check,
1340 &pitch_align, &height_align, &depth_align, &base_align)) {
1341 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
1342 __func__, __LINE__, G_038000_TILE_MODE(word0));
1346 /* XXX check height as well... */
1348 if (!IS_ALIGNED(pitch, pitch_align)) {
1349 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1350 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1353 if (!IS_ALIGNED(base_offset, base_align)) {
1354 dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
1355 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1358 if (!IS_ALIGNED(mip_offset, base_align)) {
1359 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
1360 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1364 word2 = radeon_get_ib_value(p, idx + 2) << 8;
1365 word3 = radeon_get_ib_value(p, idx + 3) << 8;
1367 word0 = radeon_get_ib_value(p, idx + 4);
1368 word1 = radeon_get_ib_value(p, idx + 5);
1369 blevel = G_038010_BASE_LEVEL(word0);
1370 llevel = G_038014_LAST_LEVEL(word1);
1372 barray = G_038014_BASE_ARRAY(word1);
1373 larray = G_038014_LAST_ARRAY(word1);
1375 nfaces = larray - barray + 1;
1377 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format,
1378 pitch_align, height_align, base_align,
1379 &l0_size, &mipmap_size);
1380 /* using get ib will give us the offset into the texture bo */
1381 if ((l0_size + word2) > radeon_bo_size(texture)) {
1382 dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
1383 w0, h0, format, word2, l0_size, radeon_bo_size(texture));
1384 dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
1387 /* using get ib will give us the offset into the mipmap bo */
1388 word3 = radeon_get_ib_value(p, idx + 3) << 8;
1389 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
1390 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1391 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
1396 static int r600_packet3_check(struct radeon_cs_parser *p,
1397 struct radeon_cs_packet *pkt)
1399 struct radeon_cs_reloc *reloc;
1400 struct r600_cs_track *track;
1404 unsigned start_reg, end_reg, reg;
1408 track = (struct r600_cs_track *)p->track;
1411 idx_value = radeon_get_ib_value(p, idx);
1413 switch (pkt->opcode) {
1414 case PACKET3_SET_PREDICATION:
1418 if (pkt->count != 1) {
1419 DRM_ERROR("bad SET PREDICATION\n");
1423 tmp = radeon_get_ib_value(p, idx + 1);
1424 pred_op = (tmp >> 16) & 0x7;
1426 /* for the clear predicate operation */
1431 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1435 r = r600_cs_packet_next_reloc(p, &reloc);
1437 DRM_ERROR("bad SET PREDICATION\n");
1441 ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1442 ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
1446 case PACKET3_START_3D_CMDBUF:
1447 if (p->family >= CHIP_RV770 || pkt->count) {
1448 DRM_ERROR("bad START_3D\n");
1452 case PACKET3_CONTEXT_CONTROL:
1453 if (pkt->count != 1) {
1454 DRM_ERROR("bad CONTEXT_CONTROL\n");
1458 case PACKET3_INDEX_TYPE:
1459 case PACKET3_NUM_INSTANCES:
1461 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1465 case PACKET3_DRAW_INDEX:
1466 if (pkt->count != 3) {
1467 DRM_ERROR("bad DRAW_INDEX\n");
1470 r = r600_cs_packet_next_reloc(p, &reloc);
1472 DRM_ERROR("bad DRAW_INDEX\n");
1475 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1476 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1477 r = r600_cs_track_check(p);
1479 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1483 case PACKET3_DRAW_INDEX_AUTO:
1484 if (pkt->count != 1) {
1485 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1488 r = r600_cs_track_check(p);
1490 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1494 case PACKET3_DRAW_INDEX_IMMD_BE:
1495 case PACKET3_DRAW_INDEX_IMMD:
1496 if (pkt->count < 2) {
1497 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1500 r = r600_cs_track_check(p);
1502 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1506 case PACKET3_WAIT_REG_MEM:
1507 if (pkt->count != 5) {
1508 DRM_ERROR("bad WAIT_REG_MEM\n");
1511 /* bit 4 is reg (0) or mem (1) */
1512 if (idx_value & 0x10) {
1513 r = r600_cs_packet_next_reloc(p, &reloc);
1515 DRM_ERROR("bad WAIT_REG_MEM\n");
1518 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1519 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1522 case PACKET3_SURFACE_SYNC:
1523 if (pkt->count != 3) {
1524 DRM_ERROR("bad SURFACE_SYNC\n");
1527 /* 0xffffffff/0x0 is flush all cache flag */
1528 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1529 radeon_get_ib_value(p, idx + 2) != 0) {
1530 r = r600_cs_packet_next_reloc(p, &reloc);
1532 DRM_ERROR("bad SURFACE_SYNC\n");
1535 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1538 case PACKET3_EVENT_WRITE:
1539 if (pkt->count != 2 && pkt->count != 0) {
1540 DRM_ERROR("bad EVENT_WRITE\n");
1544 r = r600_cs_packet_next_reloc(p, &reloc);
1546 DRM_ERROR("bad EVENT_WRITE\n");
1549 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1550 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1553 case PACKET3_EVENT_WRITE_EOP:
1554 if (pkt->count != 4) {
1555 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1558 r = r600_cs_packet_next_reloc(p, &reloc);
1560 DRM_ERROR("bad EVENT_WRITE\n");
1563 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1564 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1566 case PACKET3_SET_CONFIG_REG:
1567 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
1568 end_reg = 4 * pkt->count + start_reg - 4;
1569 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
1570 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1571 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1572 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1575 for (i = 0; i < pkt->count; i++) {
1576 reg = start_reg + (4 * i);
1577 r = r600_cs_check_reg(p, reg, idx+1+i);
1582 case PACKET3_SET_CONTEXT_REG:
1583 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
1584 end_reg = 4 * pkt->count + start_reg - 4;
1585 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
1586 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1587 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1588 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1591 for (i = 0; i < pkt->count; i++) {
1592 reg = start_reg + (4 * i);
1593 r = r600_cs_check_reg(p, reg, idx+1+i);
1598 case PACKET3_SET_RESOURCE:
1599 if (pkt->count % 7) {
1600 DRM_ERROR("bad SET_RESOURCE\n");
1603 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
1604 end_reg = 4 * pkt->count + start_reg - 4;
1605 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
1606 (start_reg >= PACKET3_SET_RESOURCE_END) ||
1607 (end_reg >= PACKET3_SET_RESOURCE_END)) {
1608 DRM_ERROR("bad SET_RESOURCE\n");
1611 for (i = 0; i < (pkt->count / 7); i++) {
1612 struct radeon_bo *texture, *mipmap;
1613 u32 size, offset, base_offset, mip_offset;
1615 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
1616 case SQ_TEX_VTX_VALID_TEXTURE:
1618 r = r600_cs_packet_next_reloc(p, &reloc);
1620 DRM_ERROR("bad SET_RESOURCE\n");
1623 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1624 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1625 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1626 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1627 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1628 texture = reloc->robj;
1630 r = r600_cs_packet_next_reloc(p, &reloc);
1632 DRM_ERROR("bad SET_RESOURCE\n");
1635 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1636 mipmap = reloc->robj;
1637 r = r600_check_texture_resource(p, idx+(i*7)+1,
1639 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
1640 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
1641 reloc->lobj.tiling_flags);
1644 ib[idx+1+(i*7)+2] += base_offset;
1645 ib[idx+1+(i*7)+3] += mip_offset;
1647 case SQ_TEX_VTX_VALID_BUFFER:
1649 r = r600_cs_packet_next_reloc(p, &reloc);
1651 DRM_ERROR("bad SET_RESOURCE\n");
1654 offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
1655 size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
1656 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1657 /* force size to size of the buffer */
1658 dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
1659 size + offset, radeon_bo_size(reloc->robj));
1660 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
1662 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
1663 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1665 case SQ_TEX_VTX_INVALID_TEXTURE:
1666 case SQ_TEX_VTX_INVALID_BUFFER:
1668 DRM_ERROR("bad SET_RESOURCE\n");
1673 case PACKET3_SET_ALU_CONST:
1674 if (track->sq_config & DX9_CONSTS) {
1675 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
1676 end_reg = 4 * pkt->count + start_reg - 4;
1677 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
1678 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
1679 (end_reg >= PACKET3_SET_ALU_CONST_END)) {
1680 DRM_ERROR("bad SET_ALU_CONST\n");
1685 case PACKET3_SET_BOOL_CONST:
1686 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
1687 end_reg = 4 * pkt->count + start_reg - 4;
1688 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
1689 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
1690 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
1691 DRM_ERROR("bad SET_BOOL_CONST\n");
1695 case PACKET3_SET_LOOP_CONST:
1696 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
1697 end_reg = 4 * pkt->count + start_reg - 4;
1698 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
1699 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
1700 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
1701 DRM_ERROR("bad SET_LOOP_CONST\n");
1705 case PACKET3_SET_CTL_CONST:
1706 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
1707 end_reg = 4 * pkt->count + start_reg - 4;
1708 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
1709 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
1710 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
1711 DRM_ERROR("bad SET_CTL_CONST\n");
1715 case PACKET3_SET_SAMPLER:
1716 if (pkt->count % 3) {
1717 DRM_ERROR("bad SET_SAMPLER\n");
1720 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
1721 end_reg = 4 * pkt->count + start_reg - 4;
1722 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
1723 (start_reg >= PACKET3_SET_SAMPLER_END) ||
1724 (end_reg >= PACKET3_SET_SAMPLER_END)) {
1725 DRM_ERROR("bad SET_SAMPLER\n");
1729 case PACKET3_SURFACE_BASE_UPDATE:
1730 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
1731 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1735 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1742 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1748 int r600_cs_parse(struct radeon_cs_parser *p)
1750 struct radeon_cs_packet pkt;
1751 struct r600_cs_track *track;
1754 if (p->track == NULL) {
1755 /* initialize tracker, we are in kms */
1756 track = kzalloc(sizeof(*track), GFP_KERNEL);
1759 r600_cs_track_init(track);
1760 if (p->rdev->family < CHIP_RV770) {
1761 track->npipes = p->rdev->config.r600.tiling_npipes;
1762 track->nbanks = p->rdev->config.r600.tiling_nbanks;
1763 track->group_size = p->rdev->config.r600.tiling_group_size;
1764 } else if (p->rdev->family <= CHIP_RV740) {
1765 track->npipes = p->rdev->config.rv770.tiling_npipes;
1766 track->nbanks = p->rdev->config.rv770.tiling_nbanks;
1767 track->group_size = p->rdev->config.rv770.tiling_group_size;
1772 r = r600_cs_packet_parse(p, &pkt, p->idx);
1778 p->idx += pkt.count + 2;
1781 r = r600_cs_parse_packet0(p, &pkt);
1786 r = r600_packet3_check(p, &pkt);
1789 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1799 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1801 for (r = 0; r < p->ib->length_dw; r++) {
1802 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
1811 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
1813 if (p->chunk_relocs_idx == -1) {
1816 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
1817 if (p->relocs == NULL) {
1824 * cs_parser_fini() - clean parser states
1825 * @parser: parser structure holding parsing context.
1826 * @error: error number
1828 * If error is set than unvalidate buffer, otherwise just free memory
1829 * used by parsing context.
1831 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
1835 kfree(parser->relocs);
1836 for (i = 0; i < parser->nchunks; i++) {
1837 kfree(parser->chunks[i].kdata);
1838 kfree(parser->chunks[i].kpage[0]);
1839 kfree(parser->chunks[i].kpage[1]);
1841 kfree(parser->chunks);
1842 kfree(parser->chunks_array);
1845 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
1846 unsigned family, u32 *ib, int *l)
1848 struct radeon_cs_parser parser;
1849 struct radeon_cs_chunk *ib_chunk;
1850 struct radeon_ib fake_ib;
1851 struct r600_cs_track *track;
1854 /* initialize tracker */
1855 track = kzalloc(sizeof(*track), GFP_KERNEL);
1858 r600_cs_track_init(track);
1859 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
1860 /* initialize parser */
1861 memset(&parser, 0, sizeof(struct radeon_cs_parser));
1863 parser.dev = &dev->pdev->dev;
1865 parser.family = family;
1866 parser.ib = &fake_ib;
1867 parser.track = track;
1869 r = radeon_cs_parser_init(&parser, data);
1871 DRM_ERROR("Failed to initialize parser !\n");
1872 r600_cs_parser_fini(&parser, r);
1875 r = r600_cs_parser_relocs_legacy(&parser);
1877 DRM_ERROR("Failed to parse relocation !\n");
1878 r600_cs_parser_fini(&parser, r);
1881 /* Copy the packet into the IB, the parser will read from the
1882 * input memory (cached) and write to the IB (which can be
1884 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
1885 parser.ib->length_dw = ib_chunk->length_dw;
1886 *l = parser.ib->length_dw;
1887 r = r600_cs_parse(&parser);
1889 DRM_ERROR("Invalid command stream !\n");
1890 r600_cs_parser_fini(&parser, r);
1893 r = radeon_cs_finish_pages(&parser);
1895 DRM_ERROR("Invalid command stream !\n");
1896 r600_cs_parser_fini(&parser, r);
1899 r600_cs_parser_fini(&parser, r);
1903 void r600_cs_legacy_init(void)
1905 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;