diff --git a/src/gallium/drivers/panfrost/pan_csf.c b/src/gallium/drivers/panfrost/pan_csf.c index d5cd8aa3673..9bc3e90e8ed 100644 --- a/src/gallium/drivers/panfrost/pan_csf.c +++ b/src/gallium/drivers/panfrost/pan_csf.c @@ -142,16 +142,20 @@ csf_oom_handler_init(struct panfrost_context *ctx) cs_load32_to(&b, counter, tiler_oom_ctx, FIELD_OFFSET(counter)); cs_wait_slot(&b, 0, false); cs_if(&b, MALI_CS_CONDITION_GREATER, counter) { - cs_load64_to(&b, cs_reg64(&b, 40), tiler_oom_ctx, FBD_OFFSET(MIDDLE)); + cs_load64_to(&b, cs_reg64(&b, MALI_FRAGMENT_SR_FBD_POINTER), + tiler_oom_ctx, FBD_OFFSET(MIDDLE)); } cs_else(&b) { - cs_load64_to(&b, cs_reg64(&b, 40), tiler_oom_ctx, FBD_OFFSET(FIRST)); + cs_load64_to(&b, cs_reg64(&b, MALI_FRAGMENT_SR_FBD_POINTER), + tiler_oom_ctx, FBD_OFFSET(FIRST)); } - cs_load32_to(&b, cs_reg32(&b, 42), tiler_oom_ctx, FIELD_OFFSET(bbox_min)); - cs_load32_to(&b, cs_reg32(&b, 43), tiler_oom_ctx, FIELD_OFFSET(bbox_max)); - cs_move64_to(&b, cs_reg64(&b, 44), 0); - cs_move32_to(&b, cs_reg32(&b, 46), 0); + cs_load32_to(&b, cs_reg32(&b, MALI_FRAGMENT_SR_BBOX_MIN), tiler_oom_ctx, + FIELD_OFFSET(bbox_min)); + cs_load32_to(&b, cs_reg32(&b, MALI_FRAGMENT_SR_BBOX_MAX), tiler_oom_ctx, + FIELD_OFFSET(bbox_max)); + cs_move64_to(&b, cs_reg64(&b, MALI_FRAGMENT_SR_TEM_POINTER), 0); + cs_move32_to(&b, cs_reg32(&b, MALI_FRAGMENT_SR_TEM_ROW_STRIDE), 0); cs_wait_slot(&b, 0, false); /* Run the fragment job and wait */ @@ -809,12 +813,14 @@ GENX(csf_emit_fragment_job)(struct panfrost_batch *batch, } /* Set up the fragment job */ - cs_move64_to(b, cs_reg64(b, 40), batch->framebuffer.gpu); - cs_move32_to(b, cs_reg32(b, 42), (batch->miny << 16) | batch->minx); - cs_move32_to(b, cs_reg32(b, 43), + cs_move64_to(b, cs_reg64(b, MALI_FRAGMENT_SR_FBD_POINTER), + batch->framebuffer.gpu); + cs_move32_to(b, cs_reg32(b, MALI_FRAGMENT_SR_BBOX_MIN), + (batch->miny << 16) | batch->minx); + cs_move32_to(b, cs_reg32(b, MALI_FRAGMENT_SR_BBOX_MAX), ((batch->maxy - 1) << 16) | (batch->maxx - 1)); - cs_move64_to(b, cs_reg64(b, 44), 0); - cs_move32_to(b, cs_reg32(b, 46), 0); + cs_move64_to(b, cs_reg64(b, MALI_FRAGMENT_SR_TEM_POINTER), 0); + cs_move32_to(b, cs_reg32(b, MALI_FRAGMENT_SR_TEM_ROW_STRIDE), 0); /* Use different framebuffer descriptor if incremental rendering was * triggered while tiling */ @@ -823,7 +829,8 @@ GENX(csf_emit_fragment_job)(struct panfrost_batch *batch, cs_load32_to(b, counter, cs_reg64(b, TILER_OOM_CTX_REG), 0); cs_wait_slot(b, 0, false); cs_if(b, MALI_CS_CONDITION_GREATER, counter) { - cs_move64_to(b, cs_reg64(b, 40), GET_FBD(oom_ctx, LAST).gpu); + cs_move64_to(b, cs_reg64(b, MALI_FRAGMENT_SR_FBD_POINTER), + GET_FBD(oom_ctx, LAST).gpu); } } diff --git a/src/panfrost/lib/genxml/decode_csf.c b/src/panfrost/lib/genxml/decode_csf.c index 5dbf52320ff..f465e3cf23e 100644 --- a/src/panfrost/lib/genxml/decode_csf.c +++ b/src/panfrost/lib/genxml/decode_csf.c @@ -770,11 +770,12 @@ pandecode_run_fragment(struct pandecode_context *ctx, FILE *fp, ctx->indent++; - DUMP_CL(ctx, SCISSOR, &qctx->regs[42], "Scissor\n"); + DUMP_CL(ctx, SCISSOR, &qctx->regs[MALI_FRAGMENT_SR_BBOX_MIN], "Scissor\n"); /* TODO: Tile enable map */ GENX(pandecode_fbd) - (ctx, cs_get_u64(qctx, 40) & ~0x3full, true, qctx->gpu_id); + (ctx, cs_get_u64(qctx, MALI_FRAGMENT_SR_FBD_POINTER) & ~0x3full, true, + qctx->gpu_id); ctx->indent--; } diff --git a/src/panfrost/lib/genxml/v10.xml b/src/panfrost/lib/genxml/v10.xml index e97d6d98625..1d9abdda7e7 100644 --- a/src/panfrost/lib/genxml/v10.xml +++ b/src/panfrost/lib/genxml/v10.xml @@ -907,6 +907,14 @@ + + + + + + + + diff --git a/src/panfrost/vulkan/csf/panvk_vX_cmd_draw.c b/src/panfrost/vulkan/csf/panvk_vX_cmd_draw.c index 59d29837fa2..0a50e896aa2 100644 --- a/src/panfrost/vulkan/csf/panvk_vX_cmd_draw.c +++ b/src/panfrost/vulkan/csf/panvk_vX_cmd_draw.c @@ -1061,7 +1061,8 @@ get_fb_descs(struct panvk_cmd_buffer *cmdbuf) if (copy_fbds) { struct cs_index cur_tiler = cs_sr_reg64(b, 38); - struct cs_index dst_fbd_ptr = cs_sr_reg64(b, 40); + struct cs_index dst_fbd_ptr = + cs_sr_reg64(b, MALI_FRAGMENT_SR_FBD_POINTER); struct cs_index layer_count = cs_sr_reg32(b, 47); struct cs_index src_fbd_ptr = cs_sr_reg64(b, 48); struct cs_index remaining_layers_in_td = cs_sr_reg32(b, 50); @@ -1144,7 +1145,8 @@ get_fb_descs(struct panvk_cmd_buffer *cmdbuf) } } else { cs_update_frag_ctx(b) { - cs_move64_to(b, cs_sr_reg64(b, 40), fbds.gpu | fbd_flags); + cs_move64_to(b, cs_sr_reg64(b, MALI_FRAGMENT_SR_FBD_POINTER), + fbds.gpu | fbd_flags); cs_move64_to(b, cs_sr_reg64(b, 38), cmdbuf->state.gfx.render.tiler); } } @@ -2268,17 +2270,17 @@ setup_tiler_oom_ctx(struct panvk_cmd_buffer *cmdbuf) TILER_OOM_CTX_FIELD_OFFSET(counter)); struct cs_index fbd_first = cs_scratch_reg64(b, 2); - cs_add64(b, fbd_first, cs_sr_reg64(b, 40), + cs_add64(b, fbd_first, cs_sr_reg64(b, MALI_FRAGMENT_SR_FBD_POINTER), (1 + PANVK_IR_FIRST_PASS) * fbd_ir_pass_offset); cs_store64(b, fbd_first, cs_subqueue_ctx_reg(b), TILER_OOM_CTX_FBDPTR_OFFSET(FIRST)); struct cs_index fbd_middle = cs_scratch_reg64(b, 4); - cs_add64(b, fbd_middle, cs_sr_reg64(b, 40), + cs_add64(b, fbd_middle, cs_sr_reg64(b, MALI_FRAGMENT_SR_FBD_POINTER), (1 + PANVK_IR_MIDDLE_PASS) * fbd_ir_pass_offset); cs_store64(b, fbd_middle, cs_subqueue_ctx_reg(b), TILER_OOM_CTX_FBDPTR_OFFSET(MIDDLE)); struct cs_index fbd_last = cs_scratch_reg64(b, 6); - cs_add64(b, fbd_last, cs_sr_reg64(b, 40), + cs_add64(b, fbd_last, cs_sr_reg64(b, MALI_FRAGMENT_SR_FBD_POINTER), (1 + PANVK_IR_LAST_PASS) * fbd_ir_pass_offset); cs_store64(b, fbd_last, cs_subqueue_ctx_reg(b), TILER_OOM_CTX_FBDPTR_OFFSET(LAST)); @@ -2382,7 +2384,8 @@ issue_fragment_jobs(struct panvk_cmd_buffer *cmdbuf) cs_wait_slot(b, SB_ID(LS), false); cs_if(b, MALI_CS_CONDITION_GREATER, counter) cs_update_frag_ctx(b) - cs_add64(b, cs_sr_reg64(b, 40), cs_sr_reg64(b, 40), + cs_add64(b, cs_sr_reg64(b, MALI_FRAGMENT_SR_FBD_POINTER), + cs_sr_reg64(b, MALI_FRAGMENT_SR_FBD_POINTER), (1 + PANVK_IR_LAST_PASS) * fbd_ir_pass_offset); /* Applications tend to forget to describe subpass dependencies, especially @@ -2408,7 +2411,8 @@ issue_fragment_jobs(struct panvk_cmd_buffer *cmdbuf) cs_add32(b, layer_count, layer_count, -1); cs_update_frag_ctx(b) - cs_add64(b, cs_sr_reg64(b, 40), cs_sr_reg64(b, 40), fbd_sz); + cs_add64(b, cs_sr_reg64(b, MALI_FRAGMENT_SR_FBD_POINTER), + cs_sr_reg64(b, MALI_FRAGMENT_SR_FBD_POINTER), fbd_sz); } } else { cs_trace_run_fragment(b, tracing_ctx, cs_scratch_reg_tuple(b, 0, 4), diff --git a/src/panfrost/vulkan/csf/panvk_vX_exception_handler.c b/src/panfrost/vulkan/csf/panvk_vX_exception_handler.c index f6474516d81..6b83dbc44cd 100644 --- a/src/panfrost/vulkan/csf/panvk_vX_exception_handler.c +++ b/src/panfrost/vulkan/csf/panvk_vX_exception_handler.c @@ -66,7 +66,7 @@ generate_tiler_oom_handler(struct cs_buffer handler_mem, bool has_zs_ext, /* The tiler pointer is pre-filled. */ struct cs_index tiler_ptr = cs_sr_reg64(&b, 38); - struct cs_index fbd_ptr = cs_sr_reg64(&b, 40); + struct cs_index fbd_ptr = cs_sr_reg64(&b, MALI_FRAGMENT_SR_FBD_POINTER); /* Use different framebuffer descriptor depending on whether incremental * rendering has already been triggered */