2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include "evergreend.h"
31 #include "evergreen_reg_safe.h"
32 #include "cayman_reg_safe.h"
34 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
35 struct radeon_cs_reloc **cs_reloc);
37 struct evergreen_cs_track {
43 u32 cb_color_base_last[12];
44 struct radeon_bo *cb_color_bo[12];
45 u32 cb_color_bo_offset[12];
46 struct radeon_bo *cb_color_fmask_bo[8];
47 struct radeon_bo *cb_color_cmask_bo[8];
48 u32 cb_color_info[12];
49 u32 cb_color_view[12];
50 u32 cb_color_pitch_idx[12];
51 u32 cb_color_slice_idx[12];
52 u32 cb_color_dim_idx[12];
54 u32 cb_color_pitch[12];
55 u32 cb_color_slice[12];
56 u32 cb_color_cmask_slice[8];
57 u32 cb_color_fmask_slice[8];
60 u32 vgt_strmout_config;
61 u32 vgt_strmout_buffer_config;
65 u32 db_depth_size_idx;
69 u32 db_z_write_offset;
70 struct radeon_bo *db_z_read_bo;
71 struct radeon_bo *db_z_write_bo;
75 u32 db_s_write_offset;
76 struct radeon_bo *db_s_read_bo;
77 struct radeon_bo *db_s_write_bo;
80 static void evergreen_cs_track_init(struct evergreen_cs_track *track)
84 for (i = 0; i < 8; i++) {
85 track->cb_color_fmask_bo[i] = NULL;
86 track->cb_color_cmask_bo[i] = NULL;
87 track->cb_color_cmask_slice[i] = 0;
88 track->cb_color_fmask_slice[i] = 0;
91 for (i = 0; i < 12; i++) {
92 track->cb_color_base_last[i] = 0;
93 track->cb_color_bo[i] = NULL;
94 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
95 track->cb_color_info[i] = 0;
96 track->cb_color_view[i] = 0;
97 track->cb_color_pitch_idx[i] = 0;
98 track->cb_color_slice_idx[i] = 0;
99 track->cb_color_dim[i] = 0;
100 track->cb_color_pitch[i] = 0;
101 track->cb_color_slice[i] = 0;
102 track->cb_color_dim[i] = 0;
104 track->cb_target_mask = 0xFFFFFFFF;
105 track->cb_shader_mask = 0xFFFFFFFF;
107 track->db_depth_view = 0xFFFFC000;
108 track->db_depth_size = 0xFFFFFFFF;
109 track->db_depth_size_idx = 0;
110 track->db_depth_control = 0xFFFFFFFF;
111 track->db_z_info = 0xFFFFFFFF;
112 track->db_z_idx = 0xFFFFFFFF;
113 track->db_z_read_offset = 0xFFFFFFFF;
114 track->db_z_write_offset = 0xFFFFFFFF;
115 track->db_z_read_bo = NULL;
116 track->db_z_write_bo = NULL;
117 track->db_s_info = 0xFFFFFFFF;
118 track->db_s_idx = 0xFFFFFFFF;
119 track->db_s_read_offset = 0xFFFFFFFF;
120 track->db_s_write_offset = 0xFFFFFFFF;
121 track->db_s_read_bo = NULL;
122 track->db_s_write_bo = NULL;
125 static int evergreen_cs_track_check(struct radeon_cs_parser *p)
127 struct evergreen_cs_track *track = p->track;
129 /* we don't support stream out buffer yet */
130 if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) {
131 dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
140 * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
141 * @parser: parser structure holding parsing context.
142 * @pkt: where to store packet informations
144 * Assume that chunk_ib_index is properly set. Will return -EINVAL
145 * if packet is bigger than remaining ib size. or if packets is unknown.
147 int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
148 struct radeon_cs_packet *pkt,
151 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
154 if (idx >= ib_chunk->length_dw) {
155 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
156 idx, ib_chunk->length_dw);
159 header = radeon_get_ib_value(p, idx);
161 pkt->type = CP_PACKET_GET_TYPE(header);
162 pkt->count = CP_PACKET_GET_COUNT(header);
166 pkt->reg = CP_PACKET0_GET_REG(header);
169 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
175 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
178 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
179 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
180 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
187 * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
188 * @parser: parser structure holding parsing context.
189 * @data: pointer to relocation data
190 * @offset_start: starting offset
191 * @offset_mask: offset mask (to align start offset on)
192 * @reloc: reloc informations
194 * Check next packet is relocation packet3, do bo validation and compute
195 * GPU offset using the provided start.
197 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
198 struct radeon_cs_reloc **cs_reloc)
200 struct radeon_cs_chunk *relocs_chunk;
201 struct radeon_cs_packet p3reloc;
205 if (p->chunk_relocs_idx == -1) {
206 DRM_ERROR("No relocation chunk !\n");
210 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
211 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
215 p->idx += p3reloc.count + 2;
216 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
217 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
221 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
222 if (idx >= relocs_chunk->length_dw) {
223 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
224 idx, relocs_chunk->length_dw);
227 /* FIXME: we assume reloc size is 4 dwords */
228 *cs_reloc = p->relocs_ptr[(idx / 4)];
233 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
234 * @parser: parser structure holding parsing context.
236 * Userspace sends a special sequence for VLINE waits.
237 * PACKET0 - VLINE_START_END + value
238 * PACKET3 - WAIT_REG_MEM poll vline status reg
239 * RELOC (P3) - crtc_id in reloc.
241 * This function parses this and relocates the VLINE START END
242 * and WAIT_REG_MEM packets to the correct crtc.
243 * It also detects a switched off crtc and nulls out the
246 static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
248 struct drm_mode_object *obj;
249 struct drm_crtc *crtc;
250 struct radeon_crtc *radeon_crtc;
251 struct radeon_cs_packet p3reloc, wait_reg_mem;
254 uint32_t header, h_idx, reg, wait_reg_mem_info;
255 volatile uint32_t *ib;
259 /* parse the WAIT_REG_MEM */
260 r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
264 /* check its a WAIT_REG_MEM */
265 if (wait_reg_mem.type != PACKET_TYPE3 ||
266 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
267 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
271 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
272 /* bit 4 is reg (0) or mem (1) */
273 if (wait_reg_mem_info & 0x10) {
274 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
277 /* waiting for value to be equal */
278 if ((wait_reg_mem_info & 0x7) != 0x3) {
279 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
282 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
283 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
287 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
288 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
292 /* jump over the NOP */
293 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
298 p->idx += wait_reg_mem.count + 2;
299 p->idx += p3reloc.count + 2;
301 header = radeon_get_ib_value(p, h_idx);
302 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
303 reg = CP_PACKET0_GET_REG(header);
304 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
306 DRM_ERROR("cannot find crtc %d\n", crtc_id);
309 crtc = obj_to_crtc(obj);
310 radeon_crtc = to_radeon_crtc(crtc);
311 crtc_id = radeon_crtc->crtc_id;
313 if (!crtc->enabled) {
314 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
315 ib[h_idx + 2] = PACKET2(0);
316 ib[h_idx + 3] = PACKET2(0);
317 ib[h_idx + 4] = PACKET2(0);
318 ib[h_idx + 5] = PACKET2(0);
319 ib[h_idx + 6] = PACKET2(0);
320 ib[h_idx + 7] = PACKET2(0);
321 ib[h_idx + 8] = PACKET2(0);
324 case EVERGREEN_VLINE_START_END:
325 header &= ~R600_CP_PACKET0_REG_MASK;
326 header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
328 ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
331 DRM_ERROR("unknown crtc reloc\n");
338 static int evergreen_packet0_check(struct radeon_cs_parser *p,
339 struct radeon_cs_packet *pkt,
340 unsigned idx, unsigned reg)
345 case EVERGREEN_VLINE_START_END:
346 r = evergreen_cs_packet_parse_vline(p);
348 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
354 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
361 static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
362 struct radeon_cs_packet *pkt)
370 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
371 r = evergreen_packet0_check(p, pkt, idx, reg);
380 * evergreen_cs_check_reg() - check if register is authorized or not
381 * @parser: parser structure holding parsing context
382 * @reg: register we are testing
383 * @idx: index into the cs buffer
385 * This function will test against evergreen_reg_safe_bm and return 0
386 * if register is safe. If register is not flag as safe this function
387 * will test it against a list of register needind special handling.
389 static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
391 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
392 struct radeon_cs_reloc *reloc;
397 if (p->rdev->family >= CHIP_CAYMAN)
398 last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
400 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
404 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
407 m = 1 << ((reg >> 2) & 31);
408 if (p->rdev->family >= CHIP_CAYMAN) {
409 if (!(cayman_reg_safe_bm[i] & m))
412 if (!(evergreen_reg_safe_bm[i] & m))
417 /* force following reg to 0 in an attempt to disable out buffer
418 * which will need us to better understand how it works to perform
419 * security check on it (Jerome)
421 case SQ_ESGS_RING_SIZE:
422 case SQ_GSVS_RING_SIZE:
423 case SQ_ESTMP_RING_SIZE:
424 case SQ_GSTMP_RING_SIZE:
425 case SQ_HSTMP_RING_SIZE:
426 case SQ_LSTMP_RING_SIZE:
427 case SQ_PSTMP_RING_SIZE:
428 case SQ_VSTMP_RING_SIZE:
429 case SQ_ESGS_RING_ITEMSIZE:
430 case SQ_ESTMP_RING_ITEMSIZE:
431 case SQ_GSTMP_RING_ITEMSIZE:
432 case SQ_GSVS_RING_ITEMSIZE:
433 case SQ_GS_VERT_ITEMSIZE:
434 case SQ_GS_VERT_ITEMSIZE_1:
435 case SQ_GS_VERT_ITEMSIZE_2:
436 case SQ_GS_VERT_ITEMSIZE_3:
437 case SQ_GSVS_RING_OFFSET_1:
438 case SQ_GSVS_RING_OFFSET_2:
439 case SQ_GSVS_RING_OFFSET_3:
440 case SQ_HSTMP_RING_ITEMSIZE:
441 case SQ_LSTMP_RING_ITEMSIZE:
442 case SQ_PSTMP_RING_ITEMSIZE:
443 case SQ_VSTMP_RING_ITEMSIZE:
444 case VGT_TF_RING_SIZE:
445 /* get value to populate the IB don't remove */
446 /*tmp =radeon_get_ib_value(p, idx);
449 case SQ_ESGS_RING_BASE:
450 case SQ_GSVS_RING_BASE:
451 case SQ_ESTMP_RING_BASE:
452 case SQ_GSTMP_RING_BASE:
453 case SQ_HSTMP_RING_BASE:
454 case SQ_LSTMP_RING_BASE:
455 case SQ_PSTMP_RING_BASE:
456 case SQ_VSTMP_RING_BASE:
457 r = evergreen_cs_packet_next_reloc(p, &reloc);
459 dev_warn(p->dev, "bad SET_CONTEXT_REG "
463 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
465 case DB_DEPTH_CONTROL:
466 track->db_depth_control = radeon_get_ib_value(p, idx);
469 if (p->rdev->family < CHIP_CAYMAN) {
470 dev_warn(p->dev, "bad SET_CONTEXT_REG "
475 case CAYMAN_DB_DEPTH_INFO:
476 if (p->rdev->family < CHIP_CAYMAN) {
477 dev_warn(p->dev, "bad SET_CONTEXT_REG "
483 track->db_z_info = radeon_get_ib_value(p, idx);
484 if (!p->keep_tiling_flags) {
485 r = evergreen_cs_packet_next_reloc(p, &reloc);
487 dev_warn(p->dev, "bad SET_CONTEXT_REG "
491 ib[idx] &= ~Z_ARRAY_MODE(0xf);
492 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
493 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
494 ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
495 track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
497 ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
498 track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
502 case DB_STENCIL_INFO:
503 track->db_s_info = radeon_get_ib_value(p, idx);
506 track->db_depth_view = radeon_get_ib_value(p, idx);
509 track->db_depth_size = radeon_get_ib_value(p, idx);
510 track->db_depth_size_idx = idx;
513 r = evergreen_cs_packet_next_reloc(p, &reloc);
515 dev_warn(p->dev, "bad SET_CONTEXT_REG "
519 track->db_z_read_offset = radeon_get_ib_value(p, idx);
520 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
521 track->db_z_read_bo = reloc->robj;
523 case DB_Z_WRITE_BASE:
524 r = evergreen_cs_packet_next_reloc(p, &reloc);
526 dev_warn(p->dev, "bad SET_CONTEXT_REG "
530 track->db_z_write_offset = radeon_get_ib_value(p, idx);
531 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
532 track->db_z_write_bo = reloc->robj;
534 case DB_STENCIL_READ_BASE:
535 r = evergreen_cs_packet_next_reloc(p, &reloc);
537 dev_warn(p->dev, "bad SET_CONTEXT_REG "
541 track->db_s_read_offset = radeon_get_ib_value(p, idx);
542 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
543 track->db_s_read_bo = reloc->robj;
545 case DB_STENCIL_WRITE_BASE:
546 r = evergreen_cs_packet_next_reloc(p, &reloc);
548 dev_warn(p->dev, "bad SET_CONTEXT_REG "
552 track->db_s_write_offset = radeon_get_ib_value(p, idx);
553 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
554 track->db_s_write_bo = reloc->robj;
556 case VGT_STRMOUT_CONFIG:
557 track->vgt_strmout_config = radeon_get_ib_value(p, idx);
559 case VGT_STRMOUT_BUFFER_CONFIG:
560 track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
563 track->cb_target_mask = radeon_get_ib_value(p, idx);
566 track->cb_shader_mask = radeon_get_ib_value(p, idx);
568 case PA_SC_AA_CONFIG:
569 if (p->rdev->family >= CHIP_CAYMAN) {
570 dev_warn(p->dev, "bad SET_CONTEXT_REG "
574 tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
575 track->nsamples = 1 << tmp;
577 case CAYMAN_PA_SC_AA_CONFIG:
578 if (p->rdev->family < CHIP_CAYMAN) {
579 dev_warn(p->dev, "bad SET_CONTEXT_REG "
583 tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
584 track->nsamples = 1 << tmp;
594 tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
595 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
599 case CB_COLOR10_VIEW:
600 case CB_COLOR11_VIEW:
601 tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
602 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
612 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
613 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
614 if (!p->keep_tiling_flags) {
615 r = evergreen_cs_packet_next_reloc(p, &reloc);
617 dev_warn(p->dev, "bad SET_CONTEXT_REG "
621 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
622 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
623 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
624 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
625 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
626 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
632 case CB_COLOR10_INFO:
633 case CB_COLOR11_INFO:
634 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
635 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
636 if (!p->keep_tiling_flags) {
637 r = evergreen_cs_packet_next_reloc(p, &reloc);
639 dev_warn(p->dev, "bad SET_CONTEXT_REG "
643 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
644 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
645 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
646 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
647 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
648 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
652 case CB_COLOR0_PITCH:
653 case CB_COLOR1_PITCH:
654 case CB_COLOR2_PITCH:
655 case CB_COLOR3_PITCH:
656 case CB_COLOR4_PITCH:
657 case CB_COLOR5_PITCH:
658 case CB_COLOR6_PITCH:
659 case CB_COLOR7_PITCH:
660 tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
661 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
662 track->cb_color_pitch_idx[tmp] = idx;
664 case CB_COLOR8_PITCH:
665 case CB_COLOR9_PITCH:
666 case CB_COLOR10_PITCH:
667 case CB_COLOR11_PITCH:
668 tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
669 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
670 track->cb_color_pitch_idx[tmp] = idx;
672 case CB_COLOR0_SLICE:
673 case CB_COLOR1_SLICE:
674 case CB_COLOR2_SLICE:
675 case CB_COLOR3_SLICE:
676 case CB_COLOR4_SLICE:
677 case CB_COLOR5_SLICE:
678 case CB_COLOR6_SLICE:
679 case CB_COLOR7_SLICE:
680 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
681 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
682 track->cb_color_slice_idx[tmp] = idx;
684 case CB_COLOR8_SLICE:
685 case CB_COLOR9_SLICE:
686 case CB_COLOR10_SLICE:
687 case CB_COLOR11_SLICE:
688 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
689 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
690 track->cb_color_slice_idx[tmp] = idx;
692 case CB_COLOR0_ATTRIB:
693 case CB_COLOR1_ATTRIB:
694 case CB_COLOR2_ATTRIB:
695 case CB_COLOR3_ATTRIB:
696 case CB_COLOR4_ATTRIB:
697 case CB_COLOR5_ATTRIB:
698 case CB_COLOR6_ATTRIB:
699 case CB_COLOR7_ATTRIB:
700 case CB_COLOR8_ATTRIB:
701 case CB_COLOR9_ATTRIB:
702 case CB_COLOR10_ATTRIB:
703 case CB_COLOR11_ATTRIB:
713 tmp = (reg - CB_COLOR0_DIM) / 0x3c;
714 track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
715 track->cb_color_dim_idx[tmp] = idx;
721 tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
722 track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
723 track->cb_color_dim_idx[tmp] = idx;
725 case CB_COLOR0_FMASK:
726 case CB_COLOR1_FMASK:
727 case CB_COLOR2_FMASK:
728 case CB_COLOR3_FMASK:
729 case CB_COLOR4_FMASK:
730 case CB_COLOR5_FMASK:
731 case CB_COLOR6_FMASK:
732 case CB_COLOR7_FMASK:
733 tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
734 r = evergreen_cs_packet_next_reloc(p, &reloc);
736 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
739 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
740 track->cb_color_fmask_bo[tmp] = reloc->robj;
742 case CB_COLOR0_CMASK:
743 case CB_COLOR1_CMASK:
744 case CB_COLOR2_CMASK:
745 case CB_COLOR3_CMASK:
746 case CB_COLOR4_CMASK:
747 case CB_COLOR5_CMASK:
748 case CB_COLOR6_CMASK:
749 case CB_COLOR7_CMASK:
750 tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
751 r = evergreen_cs_packet_next_reloc(p, &reloc);
753 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
756 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
757 track->cb_color_cmask_bo[tmp] = reloc->robj;
759 case CB_COLOR0_FMASK_SLICE:
760 case CB_COLOR1_FMASK_SLICE:
761 case CB_COLOR2_FMASK_SLICE:
762 case CB_COLOR3_FMASK_SLICE:
763 case CB_COLOR4_FMASK_SLICE:
764 case CB_COLOR5_FMASK_SLICE:
765 case CB_COLOR6_FMASK_SLICE:
766 case CB_COLOR7_FMASK_SLICE:
767 tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
768 track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
770 case CB_COLOR0_CMASK_SLICE:
771 case CB_COLOR1_CMASK_SLICE:
772 case CB_COLOR2_CMASK_SLICE:
773 case CB_COLOR3_CMASK_SLICE:
774 case CB_COLOR4_CMASK_SLICE:
775 case CB_COLOR5_CMASK_SLICE:
776 case CB_COLOR6_CMASK_SLICE:
777 case CB_COLOR7_CMASK_SLICE:
778 tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
779 track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
789 r = evergreen_cs_packet_next_reloc(p, &reloc);
791 dev_warn(p->dev, "bad SET_CONTEXT_REG "
795 tmp = (reg - CB_COLOR0_BASE) / 0x3c;
796 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
797 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
798 track->cb_color_base_last[tmp] = ib[idx];
799 track->cb_color_bo[tmp] = reloc->robj;
803 case CB_COLOR10_BASE:
804 case CB_COLOR11_BASE:
805 r = evergreen_cs_packet_next_reloc(p, &reloc);
807 dev_warn(p->dev, "bad SET_CONTEXT_REG "
811 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
812 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
813 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
814 track->cb_color_base_last[tmp] = ib[idx];
815 track->cb_color_bo[tmp] = reloc->robj;
827 case CB_IMMED10_BASE:
828 case CB_IMMED11_BASE:
829 case DB_HTILE_DATA_BASE:
830 case SQ_PGM_START_FS:
831 case SQ_PGM_START_ES:
832 case SQ_PGM_START_VS:
833 case SQ_PGM_START_GS:
834 case SQ_PGM_START_PS:
835 case SQ_PGM_START_HS:
836 case SQ_PGM_START_LS:
837 case SQ_CONST_MEM_BASE:
838 case SQ_ALU_CONST_CACHE_GS_0:
839 case SQ_ALU_CONST_CACHE_GS_1:
840 case SQ_ALU_CONST_CACHE_GS_2:
841 case SQ_ALU_CONST_CACHE_GS_3:
842 case SQ_ALU_CONST_CACHE_GS_4:
843 case SQ_ALU_CONST_CACHE_GS_5:
844 case SQ_ALU_CONST_CACHE_GS_6:
845 case SQ_ALU_CONST_CACHE_GS_7:
846 case SQ_ALU_CONST_CACHE_GS_8:
847 case SQ_ALU_CONST_CACHE_GS_9:
848 case SQ_ALU_CONST_CACHE_GS_10:
849 case SQ_ALU_CONST_CACHE_GS_11:
850 case SQ_ALU_CONST_CACHE_GS_12:
851 case SQ_ALU_CONST_CACHE_GS_13:
852 case SQ_ALU_CONST_CACHE_GS_14:
853 case SQ_ALU_CONST_CACHE_GS_15:
854 case SQ_ALU_CONST_CACHE_PS_0:
855 case SQ_ALU_CONST_CACHE_PS_1:
856 case SQ_ALU_CONST_CACHE_PS_2:
857 case SQ_ALU_CONST_CACHE_PS_3:
858 case SQ_ALU_CONST_CACHE_PS_4:
859 case SQ_ALU_CONST_CACHE_PS_5:
860 case SQ_ALU_CONST_CACHE_PS_6:
861 case SQ_ALU_CONST_CACHE_PS_7:
862 case SQ_ALU_CONST_CACHE_PS_8:
863 case SQ_ALU_CONST_CACHE_PS_9:
864 case SQ_ALU_CONST_CACHE_PS_10:
865 case SQ_ALU_CONST_CACHE_PS_11:
866 case SQ_ALU_CONST_CACHE_PS_12:
867 case SQ_ALU_CONST_CACHE_PS_13:
868 case SQ_ALU_CONST_CACHE_PS_14:
869 case SQ_ALU_CONST_CACHE_PS_15:
870 case SQ_ALU_CONST_CACHE_VS_0:
871 case SQ_ALU_CONST_CACHE_VS_1:
872 case SQ_ALU_CONST_CACHE_VS_2:
873 case SQ_ALU_CONST_CACHE_VS_3:
874 case SQ_ALU_CONST_CACHE_VS_4:
875 case SQ_ALU_CONST_CACHE_VS_5:
876 case SQ_ALU_CONST_CACHE_VS_6:
877 case SQ_ALU_CONST_CACHE_VS_7:
878 case SQ_ALU_CONST_CACHE_VS_8:
879 case SQ_ALU_CONST_CACHE_VS_9:
880 case SQ_ALU_CONST_CACHE_VS_10:
881 case SQ_ALU_CONST_CACHE_VS_11:
882 case SQ_ALU_CONST_CACHE_VS_12:
883 case SQ_ALU_CONST_CACHE_VS_13:
884 case SQ_ALU_CONST_CACHE_VS_14:
885 case SQ_ALU_CONST_CACHE_VS_15:
886 case SQ_ALU_CONST_CACHE_HS_0:
887 case SQ_ALU_CONST_CACHE_HS_1:
888 case SQ_ALU_CONST_CACHE_HS_2:
889 case SQ_ALU_CONST_CACHE_HS_3:
890 case SQ_ALU_CONST_CACHE_HS_4:
891 case SQ_ALU_CONST_CACHE_HS_5:
892 case SQ_ALU_CONST_CACHE_HS_6:
893 case SQ_ALU_CONST_CACHE_HS_7:
894 case SQ_ALU_CONST_CACHE_HS_8:
895 case SQ_ALU_CONST_CACHE_HS_9:
896 case SQ_ALU_CONST_CACHE_HS_10:
897 case SQ_ALU_CONST_CACHE_HS_11:
898 case SQ_ALU_CONST_CACHE_HS_12:
899 case SQ_ALU_CONST_CACHE_HS_13:
900 case SQ_ALU_CONST_CACHE_HS_14:
901 case SQ_ALU_CONST_CACHE_HS_15:
902 case SQ_ALU_CONST_CACHE_LS_0:
903 case SQ_ALU_CONST_CACHE_LS_1:
904 case SQ_ALU_CONST_CACHE_LS_2:
905 case SQ_ALU_CONST_CACHE_LS_3:
906 case SQ_ALU_CONST_CACHE_LS_4:
907 case SQ_ALU_CONST_CACHE_LS_5:
908 case SQ_ALU_CONST_CACHE_LS_6:
909 case SQ_ALU_CONST_CACHE_LS_7:
910 case SQ_ALU_CONST_CACHE_LS_8:
911 case SQ_ALU_CONST_CACHE_LS_9:
912 case SQ_ALU_CONST_CACHE_LS_10:
913 case SQ_ALU_CONST_CACHE_LS_11:
914 case SQ_ALU_CONST_CACHE_LS_12:
915 case SQ_ALU_CONST_CACHE_LS_13:
916 case SQ_ALU_CONST_CACHE_LS_14:
917 case SQ_ALU_CONST_CACHE_LS_15:
918 r = evergreen_cs_packet_next_reloc(p, &reloc);
920 dev_warn(p->dev, "bad SET_CONTEXT_REG "
924 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
926 case SX_MEMORY_EXPORT_BASE:
927 if (p->rdev->family >= CHIP_CAYMAN) {
928 dev_warn(p->dev, "bad SET_CONFIG_REG "
932 r = evergreen_cs_packet_next_reloc(p, &reloc);
934 dev_warn(p->dev, "bad SET_CONFIG_REG "
938 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
940 case CAYMAN_SX_SCATTER_EXPORT_BASE:
941 if (p->rdev->family < CHIP_CAYMAN) {
942 dev_warn(p->dev, "bad SET_CONTEXT_REG "
946 r = evergreen_cs_packet_next_reloc(p, &reloc);
948 dev_warn(p->dev, "bad SET_CONTEXT_REG "
952 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
955 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
962 * evergreen_check_texture_resource() - check if register is authorized or not
963 * @p: parser structure holding parsing context
964 * @idx: index into the cs buffer
965 * @texture: texture's bo structure
966 * @mipmap: mipmap's bo structure
968 * This function will check that the resource has valid field and that
969 * the texture and mipmap bo object are big enough to cover this resource.
971 static int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
972 struct radeon_bo *texture,
973 struct radeon_bo *mipmap)
979 static int evergreen_packet3_check(struct radeon_cs_parser *p,
980 struct radeon_cs_packet *pkt)
982 struct radeon_cs_reloc *reloc;
983 struct evergreen_cs_track *track;
987 unsigned start_reg, end_reg, reg;
991 track = (struct evergreen_cs_track *)p->track;
994 idx_value = radeon_get_ib_value(p, idx);
996 switch (pkt->opcode) {
997 case PACKET3_SET_PREDICATION:
1001 if (pkt->count != 1) {
1002 DRM_ERROR("bad SET PREDICATION\n");
1006 tmp = radeon_get_ib_value(p, idx + 1);
1007 pred_op = (tmp >> 16) & 0x7;
1009 /* for the clear predicate operation */
1014 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1018 r = evergreen_cs_packet_next_reloc(p, &reloc);
1020 DRM_ERROR("bad SET PREDICATION\n");
1024 ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1025 ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
1028 case PACKET3_CONTEXT_CONTROL:
1029 if (pkt->count != 1) {
1030 DRM_ERROR("bad CONTEXT_CONTROL\n");
1034 case PACKET3_INDEX_TYPE:
1035 case PACKET3_NUM_INSTANCES:
1036 case PACKET3_CLEAR_STATE:
1038 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1042 case CAYMAN_PACKET3_DEALLOC_STATE:
1043 if (p->rdev->family < CHIP_CAYMAN) {
1044 DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
1048 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1052 case PACKET3_INDEX_BASE:
1053 if (pkt->count != 1) {
1054 DRM_ERROR("bad INDEX_BASE\n");
1057 r = evergreen_cs_packet_next_reloc(p, &reloc);
1059 DRM_ERROR("bad INDEX_BASE\n");
1062 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1063 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1064 r = evergreen_cs_track_check(p);
1066 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1070 case PACKET3_DRAW_INDEX:
1071 if (pkt->count != 3) {
1072 DRM_ERROR("bad DRAW_INDEX\n");
1075 r = evergreen_cs_packet_next_reloc(p, &reloc);
1077 DRM_ERROR("bad DRAW_INDEX\n");
1080 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1081 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1082 r = evergreen_cs_track_check(p);
1084 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1088 case PACKET3_DRAW_INDEX_2:
1089 if (pkt->count != 4) {
1090 DRM_ERROR("bad DRAW_INDEX_2\n");
1093 r = evergreen_cs_packet_next_reloc(p, &reloc);
1095 DRM_ERROR("bad DRAW_INDEX_2\n");
1098 ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1099 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1100 r = evergreen_cs_track_check(p);
1102 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1106 case PACKET3_DRAW_INDEX_AUTO:
1107 if (pkt->count != 1) {
1108 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1111 r = evergreen_cs_track_check(p);
1113 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1117 case PACKET3_DRAW_INDEX_MULTI_AUTO:
1118 if (pkt->count != 2) {
1119 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1122 r = evergreen_cs_track_check(p);
1124 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1128 case PACKET3_DRAW_INDEX_IMMD:
1129 if (pkt->count < 2) {
1130 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1133 r = evergreen_cs_track_check(p);
1135 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1139 case PACKET3_DRAW_INDEX_OFFSET:
1140 if (pkt->count != 2) {
1141 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1144 r = evergreen_cs_track_check(p);
1146 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1150 case PACKET3_DRAW_INDEX_OFFSET_2:
1151 if (pkt->count != 3) {
1152 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
1155 r = evergreen_cs_track_check(p);
1157 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1161 case PACKET3_DISPATCH_DIRECT:
1162 if (pkt->count != 3) {
1163 DRM_ERROR("bad DISPATCH_DIRECT\n");
1166 r = evergreen_cs_track_check(p);
1168 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1172 case PACKET3_DISPATCH_INDIRECT:
1173 if (pkt->count != 1) {
1174 DRM_ERROR("bad DISPATCH_INDIRECT\n");
1177 r = evergreen_cs_packet_next_reloc(p, &reloc);
1179 DRM_ERROR("bad DISPATCH_INDIRECT\n");
1182 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1183 r = evergreen_cs_track_check(p);
1185 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1189 case PACKET3_WAIT_REG_MEM:
1190 if (pkt->count != 5) {
1191 DRM_ERROR("bad WAIT_REG_MEM\n");
1194 /* bit 4 is reg (0) or mem (1) */
1195 if (idx_value & 0x10) {
1196 r = evergreen_cs_packet_next_reloc(p, &reloc);
1198 DRM_ERROR("bad WAIT_REG_MEM\n");
1201 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1202 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1205 case PACKET3_SURFACE_SYNC:
1206 if (pkt->count != 3) {
1207 DRM_ERROR("bad SURFACE_SYNC\n");
1210 /* 0xffffffff/0x0 is flush all cache flag */
1211 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1212 radeon_get_ib_value(p, idx + 2) != 0) {
1213 r = evergreen_cs_packet_next_reloc(p, &reloc);
1215 DRM_ERROR("bad SURFACE_SYNC\n");
1218 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1221 case PACKET3_EVENT_WRITE:
1222 if (pkt->count != 2 && pkt->count != 0) {
1223 DRM_ERROR("bad EVENT_WRITE\n");
1227 r = evergreen_cs_packet_next_reloc(p, &reloc);
1229 DRM_ERROR("bad EVENT_WRITE\n");
1232 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1233 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1236 case PACKET3_EVENT_WRITE_EOP:
1237 if (pkt->count != 4) {
1238 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1241 r = evergreen_cs_packet_next_reloc(p, &reloc);
1243 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1246 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1247 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1249 case PACKET3_EVENT_WRITE_EOS:
1250 if (pkt->count != 3) {
1251 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1254 r = evergreen_cs_packet_next_reloc(p, &reloc);
1256 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1259 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1260 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1262 case PACKET3_SET_CONFIG_REG:
1263 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
1264 end_reg = 4 * pkt->count + start_reg - 4;
1265 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
1266 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1267 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1268 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1271 for (i = 0; i < pkt->count; i++) {
1272 reg = start_reg + (4 * i);
1273 r = evergreen_cs_check_reg(p, reg, idx+1+i);
1278 case PACKET3_SET_CONTEXT_REG:
1279 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
1280 end_reg = 4 * pkt->count + start_reg - 4;
1281 if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
1282 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1283 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1284 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1287 for (i = 0; i < pkt->count; i++) {
1288 reg = start_reg + (4 * i);
1289 r = evergreen_cs_check_reg(p, reg, idx+1+i);
1294 case PACKET3_SET_RESOURCE:
1295 if (pkt->count % 8) {
1296 DRM_ERROR("bad SET_RESOURCE\n");
1299 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
1300 end_reg = 4 * pkt->count + start_reg - 4;
1301 if ((start_reg < PACKET3_SET_RESOURCE_START) ||
1302 (start_reg >= PACKET3_SET_RESOURCE_END) ||
1303 (end_reg >= PACKET3_SET_RESOURCE_END)) {
1304 DRM_ERROR("bad SET_RESOURCE\n");
1307 for (i = 0; i < (pkt->count / 8); i++) {
1308 struct radeon_bo *texture, *mipmap;
1311 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
1312 case SQ_TEX_VTX_VALID_TEXTURE:
1314 r = evergreen_cs_packet_next_reloc(p, &reloc);
1316 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1319 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1320 if (!p->keep_tiling_flags) {
1321 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1322 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
1323 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1324 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
1326 texture = reloc->robj;
1328 r = evergreen_cs_packet_next_reloc(p, &reloc);
1330 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1333 ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1334 mipmap = reloc->robj;
1335 r = evergreen_check_texture_resource(p, idx+1+(i*8),
1340 case SQ_TEX_VTX_VALID_BUFFER:
1342 r = evergreen_cs_packet_next_reloc(p, &reloc);
1344 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
1347 offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
1348 size = radeon_get_ib_value(p, idx+1+(i*8)+1);
1349 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1350 /* force size to size of the buffer */
1351 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
1352 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
1354 ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
1355 ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1357 case SQ_TEX_VTX_INVALID_TEXTURE:
1358 case SQ_TEX_VTX_INVALID_BUFFER:
1360 DRM_ERROR("bad SET_RESOURCE\n");
1365 case PACKET3_SET_ALU_CONST:
1366 /* XXX fix me ALU const buffers only */
1368 case PACKET3_SET_BOOL_CONST:
1369 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
1370 end_reg = 4 * pkt->count + start_reg - 4;
1371 if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
1372 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
1373 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
1374 DRM_ERROR("bad SET_BOOL_CONST\n");
1378 case PACKET3_SET_LOOP_CONST:
1379 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
1380 end_reg = 4 * pkt->count + start_reg - 4;
1381 if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
1382 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
1383 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
1384 DRM_ERROR("bad SET_LOOP_CONST\n");
1388 case PACKET3_SET_CTL_CONST:
1389 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
1390 end_reg = 4 * pkt->count + start_reg - 4;
1391 if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
1392 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
1393 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
1394 DRM_ERROR("bad SET_CTL_CONST\n");
1398 case PACKET3_SET_SAMPLER:
1399 if (pkt->count % 3) {
1400 DRM_ERROR("bad SET_SAMPLER\n");
1403 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
1404 end_reg = 4 * pkt->count + start_reg - 4;
1405 if ((start_reg < PACKET3_SET_SAMPLER_START) ||
1406 (start_reg >= PACKET3_SET_SAMPLER_END) ||
1407 (end_reg >= PACKET3_SET_SAMPLER_END)) {
1408 DRM_ERROR("bad SET_SAMPLER\n");
1415 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1421 int evergreen_cs_parse(struct radeon_cs_parser *p)
1423 struct radeon_cs_packet pkt;
1424 struct evergreen_cs_track *track;
1427 if (p->track == NULL) {
1428 /* initialize tracker, we are in kms */
1429 track = kzalloc(sizeof(*track), GFP_KERNEL);
1432 evergreen_cs_track_init(track);
1433 track->npipes = p->rdev->config.evergreen.tiling_npipes;
1434 track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
1435 track->group_size = p->rdev->config.evergreen.tiling_group_size;
1439 r = evergreen_cs_packet_parse(p, &pkt, p->idx);
1445 p->idx += pkt.count + 2;
1448 r = evergreen_cs_parse_packet0(p, &pkt);
1453 r = evergreen_packet3_check(p, &pkt);
1456 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1466 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1468 for (r = 0; r < p->ib->length_dw; r++) {
1469 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);