radeonsi: Initial geometry shader support

Partly based on the corresponding r600g work by Vadim Girlin and Dave
Airlie.

Reviewed-by: Marek Olšák <marek.olsak@amd.com>
This commit is contained in:
Michel Dänzer
2013-11-21 16:45:28 +09:00
committed by Michel Dänzer
parent 51f89a03e1
commit 404b29d765
11 changed files with 1086 additions and 193 deletions
@@ -67,6 +67,7 @@
#define R600_CONTEXT_PS_PARTIAL_FLUSH (1 << 16)
#define R600_CONTEXT_WAIT_3D_IDLE (1 << 17)
#define R600_CONTEXT_WAIT_CP_DMA_IDLE (1 << 18)
#define R600_CONTEXT_VGT_FLUSH (1 << 19)
/* Debug flags. */
/* logging */
@@ -1426,4 +1426,5 @@ void radeon_llvm_dispose(struct radeon_llvm_context * ctx)
LLVMDisposeModule(ctx->soa.bld_base.base.gallivm->module);
LLVMContextDispose(ctx->soa.bld_base.base.gallivm->context);
FREE(ctx->temps);
ctx->temps = NULL;
}
+1
View File
@@ -57,6 +57,7 @@ static void si_blitter_begin(struct pipe_context *ctx, enum si_blitter_op op)
util_blitter_save_stencil_ref(sctx->blitter, &sctx->stencil_ref);
util_blitter_save_rasterizer(sctx->blitter, sctx->queued.named.rasterizer);
util_blitter_save_fragment_shader(sctx->blitter, sctx->ps_shader);
util_blitter_save_geometry_shader(sctx->blitter, sctx->gs_shader);
util_blitter_save_vertex_shader(sctx->blitter, sctx->vs_shader);
util_blitter_save_vertex_elements(sctx->blitter, sctx->vertex_elements);
if (sctx->queued.named.viewport) {
+97 -1
View File
@@ -242,6 +242,8 @@ static void si_emit_descriptors(struct si_context *sctx,
static unsigned si_get_shader_user_data_base(unsigned shader)
{
switch (shader) {
case SI_SHADER_EXPORT:
return R_00B330_SPI_SHADER_USER_DATA_ES_0;
case PIPE_SHADER_VERTEX:
return R_00B130_SPI_SHADER_USER_DATA_VS_0;
case PIPE_SHADER_GEOMETRY:
@@ -489,6 +491,100 @@ static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint s
si_update_descriptors(sctx, &buffers->desc);
}
/* RING BUFFERS */
void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
struct pipe_constant_buffer *input,
unsigned stride, unsigned num_records,
bool add_tid, bool swizzle,
unsigned element_size, unsigned index_stride)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
if (shader >= SI_NUM_SHADERS)
return;
/* The stride field in the resource descriptor has 14 bits */
assert(stride < (1 << 14));
slot += NUM_PIPE_CONST_BUFFERS + 1;
assert(slot < buffers->num_buffers);
pipe_resource_reference(&buffers->buffers[slot], NULL);
if (input && input->buffer) {
uint64_t va;
va = r600_resource_va(ctx->screen, input->buffer);
switch (element_size) {
default:
assert(!"Unsupported ring buffer element size");
case 0:
case 2:
element_size = 0;
break;
case 4:
element_size = 1;
break;
case 8:
element_size = 2;
break;
case 16:
element_size = 3;
break;
}
switch (index_stride) {
default:
assert(!"Unsupported ring buffer index stride");
case 0:
case 8:
index_stride = 0;
break;
case 16:
index_stride = 1;
break;
case 32:
index_stride = 2;
break;
case 64:
index_stride = 3;
break;
}
/* Set the descriptor. */
uint32_t *desc = buffers->desc_data[slot];
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
S_008F04_STRIDE(stride) |
S_008F04_SWIZZLE_ENABLE(swizzle);
desc[2] = num_records;
desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
S_008F0C_ELEMENT_SIZE(element_size) |
S_008F0C_INDEX_STRIDE(index_stride) |
S_008F0C_ADD_TID_ENABLE(add_tid);
pipe_resource_reference(&buffers->buffers[slot], input->buffer);
r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
(struct r600_resource*)input->buffer,
buffers->shader_usage);
buffers->desc.enabled_mask |= 1 << slot;
} else {
/* Clear the descriptor. */
memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
buffers->desc.enabled_mask &= ~(1 << slot);
}
buffers->desc.dirty_mask |= 1 << slot;
si_update_descriptors(sctx, &buffers->desc);
}
/* STREAMOUT BUFFERS */
static void si_set_streamout_targets(struct pipe_context *ctx,
@@ -825,7 +921,7 @@ void si_init_all_descriptors(struct si_context *sctx)
for (i = 0; i < SI_NUM_SHADERS; i++) {
si_init_buffer_resources(sctx, &sctx->const_buffers[i],
NUM_CONST_BUFFERS, i, SI_SGPR_CONST,
RADEON_USAGE_READ);
RADEON_USAGE_READWRITE);
si_init_sampler_views(sctx, &sctx->samplers[i].views, i);
+10
View File
@@ -83,6 +83,16 @@ static void si_destroy_context(struct pipe_context *context)
pipe_resource_reference(&sctx->null_const_buf.buffer, NULL);
r600_resource_reference(&sctx->border_color_table, NULL);
if (sctx->gs_on) {
si_pm4_free_state(sctx, sctx->gs_on, 0);
}
if (sctx->gs_off) {
si_pm4_free_state(sctx, sctx->gs_off, 0);
}
if (sctx->gs_rings) {
si_pm4_free_state(sctx, sctx->gs_rings, 0);
}
if (sctx->dummy_pixel_shader) {
sctx->b.b.delete_fs_state(&sctx->b.b, sctx->dummy_pixel_shader);
}
+9 -1
View File
@@ -75,7 +75,9 @@ struct si_surface {
};
#define SI_NUM_ATOMS(sctx) (sizeof((sctx)->atoms)/sizeof((sctx)->atoms.array[0]))
#define SI_NUM_SHADERS (PIPE_SHADER_FRAGMENT+1)
#define SI_SHADER_EXPORT (PIPE_SHADER_GEOMETRY+1)
#define SI_NUM_SHADERS (SI_SHADER_EXPORT+1)
struct si_context {
struct r600_common_context b;
@@ -112,6 +114,7 @@ struct si_context {
/* for saving when using blitter */
struct pipe_stencil_ref stencil_ref;
struct si_pipe_shader_selector *ps_shader;
struct si_pipe_shader_selector *gs_shader;
struct si_pipe_shader_selector *vs_shader;
struct si_cs_shader_state cs_shader_state;
/* shader information */
@@ -138,8 +141,13 @@ struct si_context {
/* With rasterizer discard, there doesn't have to be a pixel shader.
* In that case, we bind this one: */
void *dummy_pixel_shader;
struct si_pm4_state *gs_on;
struct si_pm4_state *gs_off;
struct si_pm4_state *gs_rings;
struct r600_atom cache_flush;
struct pipe_constant_buffer null_const_buf; /* used for set_constant_buffer(NULL) on CIK */
struct pipe_constant_buffer esgs_ring;
struct pipe_constant_buffer gsvs_ring;
/* SI state handling */
union si_state queued;
+583 -156
View File
@@ -45,13 +45,23 @@
#include <errno.h>
struct si_shader_output_values
{
LLVMValueRef values[4];
unsigned name;
unsigned index;
unsigned usage;
};
struct si_shader_context
{
struct radeon_llvm_context radeon_bld;
struct tgsi_parse_context parse;
struct tgsi_token * tokens;
struct si_pipe_shader *shader;
struct si_shader *gs_for_vs;
unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
unsigned gs_next_vertex;
int param_streamout_config;
int param_streamout_write_index;
int param_streamout_offset[4];
@@ -87,6 +97,16 @@ static struct si_shader_context * si_shader_context(
#define LOCAL_ADDR_SPACE 3
#define USER_SGPR_ADDR_SPACE 8
#define SENDMSG_GS 2
#define SENDMSG_GS_DONE 3
#define SENDMSG_GS_OP_NOP (0 << 4)
#define SENDMSG_GS_OP_CUT (1 << 4)
#define SENDMSG_GS_OP_EMIT (2 << 4)
#define SENDMSG_GS_OP_EMIT_CUT (3 << 4)
/**
* Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
*
@@ -200,6 +220,82 @@ static void declare_input_vs(
}
}
static void declare_input_gs(
struct radeon_llvm_context *radeon_bld,
unsigned input_index,
const struct tgsi_full_declaration *decl)
{
/* Nothing to do, inputs are handled in fetch_input_gs() below */
}
static LLVMValueRef fetch_input_gs(
struct lp_build_tgsi_context *bld_base,
const struct tgsi_full_src_register *reg,
enum tgsi_opcode_type type,
unsigned swizzle)
{
struct lp_build_context *base = &bld_base->base;
struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
struct lp_build_context *uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
struct gallivm_state *gallivm = base->gallivm;
LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
LLVMValueRef vtx_offset;
LLVMValueRef t_list_ptr;
LLVMValueRef t_list;
LLVMValueRef args[9];
unsigned vtx_offset_param;
if (!reg->Register.Dimension)
return NULL;
if (swizzle == ~0) {
LLVMValueRef values[TGSI_NUM_CHANNELS];
unsigned chan;
for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
values[chan] = fetch_input_gs(bld_base, reg, type, chan);
}
return lp_build_gather_values(bld_base->base.gallivm, values,
TGSI_NUM_CHANNELS);
}
/* Get the vertex offset parameter */
vtx_offset_param = reg->Dimension.Index;
if (vtx_offset_param < 2) {
vtx_offset_param += SI_PARAM_VTX0_OFFSET;
} else {
assert(vtx_offset_param < 6);
vtx_offset_param += SI_PARAM_VTX2_OFFSET - 2;
}
vtx_offset = lp_build_mul_imm(uint,
LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
vtx_offset_param),
4);
/* Load the ESGS ring resource descriptor */
t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
t_list = build_indexed_load(si_shader_ctx, t_list_ptr,
lp_build_const_int32(gallivm,
NUM_PIPE_CONST_BUFFERS + 1));
args[0] = t_list;
args[1] = vtx_offset;
args[2] = lp_build_const_int32(gallivm,
((reg->Register.Index * 4) + swizzle) * 256);
args[3] = uint->zero;
args[4] = uint->one; /* OFFEN */
args[5] = uint->zero; /* IDXEN */
args[6] = uint->one; /* GLC */
args[7] = uint->zero; /* SLC */
args[8] = uint->zero; /* TFE */
return LLVMBuildBitCast(gallivm->builder,
build_intrinsic(gallivm->builder,
"llvm.SI.buffer.load.dword.i32.i32",
i32, args, 9,
LLVMReadOnlyAttribute | LLVMNoUnwindAttribute),
tgsi2llvmtype(bld_base, type), "");
}
static void declare_input_fs(
struct radeon_llvm_context *radeon_bld,
unsigned input_index,
@@ -458,8 +554,7 @@ static LLVMValueRef fetch_constant(
/* Initialize arguments for the shader export intrinsic */
static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
struct tgsi_full_declaration *d,
unsigned index,
LLVMValueRef *values,
unsigned target,
LLVMValueRef *args)
{
@@ -490,12 +585,8 @@ static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
if (compressed) {
/* Pixel shader needs to pack output values before export */
for (chan = 0; chan < 2; chan++ ) {
LLVMValueRef *out_ptr =
si_shader_ctx->radeon_bld.soa.outputs[index];
args[0] = LLVMBuildLoad(base->gallivm->builder,
out_ptr[2 * chan], "");
args[1] = LLVMBuildLoad(base->gallivm->builder,
out_ptr[2 * chan + 1], "");
args[0] = values[2 * chan];
args[1] = values[2 * chan + 1];
args[chan + 5] =
build_intrinsic(base->gallivm->builder,
"llvm.SI.packf16",
@@ -512,14 +603,10 @@ static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
/* Set COMPR flag */
args[4] = uint->one;
} else {
for (chan = 0; chan < 4; chan++ ) {
LLVMValueRef out_ptr =
si_shader_ctx->radeon_bld.soa.outputs[index][chan];
for (chan = 0; chan < 4; chan++ )
/* +5 because the first output value will be
* the 6th argument to the intrinsic. */
args[chan + 5] = LLVMBuildLoad(base->gallivm->builder,
out_ptr, "");
}
args[chan + 5] = values[chan];
/* Clear COMPR flag */
args[4] = uint->zero;
@@ -546,21 +633,36 @@ static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
* stage. */
}
/* Load from output pointers and initialize arguments for the shader export intrinsic */
static void si_llvm_init_export_args_load(struct lp_build_tgsi_context *bld_base,
LLVMValueRef *out_ptr,
unsigned target,
LLVMValueRef *args)
{
struct gallivm_state *gallivm = bld_base->base.gallivm;
LLVMValueRef values[4];
int i;
for (i = 0; i < 4; i++)
values[i] = LLVMBuildLoad(gallivm->builder, out_ptr[i], "");
si_llvm_init_export_args(bld_base, values, target, args);
}
static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
unsigned index)
LLVMValueRef *out_ptr)
{
struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
if (si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_NEVER) {
LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
LLVMValueRef alpha_ref = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
SI_PARAM_ALPHA_REF);
LLVMValueRef alpha_pass =
lp_build_cmp(&bld_base->base,
si_shader_ctx->shader->key.ps.alpha_func,
LLVMBuildLoad(gallivm->builder, out_ptr, ""),
LLVMBuildLoad(gallivm->builder, out_ptr[3], ""),
alpha_ref);
LLVMValueRef arg =
lp_build_select(&bld_base->base,
@@ -580,19 +682,8 @@ static void si_alpha_test(struct lp_build_tgsi_context *bld_base,
}
}
static void si_alpha_to_one(struct lp_build_tgsi_context *bld_base,
unsigned index)
{
struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
/* set alpha to one */
LLVMBuildStore(bld_base->base.gallivm->builder,
bld_base->base.one,
si_shader_ctx->radeon_bld.soa.outputs[index][3]);
}
static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context * bld_base,
LLVMValueRef (*pos)[9], unsigned index)
LLVMValueRef (*pos)[9], LLVMValueRef *out_elts)
{
struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
struct si_pipe_shader *shader = si_shader_ctx->shader;
@@ -601,17 +692,11 @@ static void si_llvm_emit_clipvertex(struct lp_build_tgsi_context * bld_base,
unsigned reg_index;
unsigned chan;
unsigned const_chan;
LLVMValueRef out_elts[4];
LLVMValueRef base_elt;
LLVMValueRef ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
LLVMValueRef constbuf_index = lp_build_const_int32(base->gallivm, NUM_PIPE_CONST_BUFFERS);
LLVMValueRef const_resource = build_indexed_load(si_shader_ctx, ptr, constbuf_index);
for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][chan];
out_elts[chan] = LLVMBuildLoad(base->gallivm->builder, out_ptr, "");
}
for (reg_index = 0; reg_index < 2; reg_index ++) {
LLVMValueRef *args = pos[2 + reg_index];
@@ -874,113 +959,104 @@ static int si_store_shader_io_attribs(struct si_shader *shader,
assert(i < Elements(shader->output));
shader->output[i].name = d->Semantic.Name;
shader->output[i].sid = d->Semantic.Index;
shader->output[i].interpolate = d->Interp.Interpolate;
shader->output[i].index = d->Range.First;
shader->output[i].usage = d->Declaration.UsageMask;
break;
}
return i;
}
static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context * bld_base)
/* Generate export instructions for hardware VS shader stage */
static void si_llvm_export_vs(struct lp_build_tgsi_context *bld_base,
struct si_shader_output_values *outputs,
unsigned noutput)
{
struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
struct si_shader * shader = &si_shader_ctx->shader->shader;
struct lp_build_context * base = &bld_base->base;
struct lp_build_context * uint =
&si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
struct tgsi_parse_context *parse = &si_shader_ctx->parse;
LLVMValueRef args[9];
LLVMValueRef pos_args[4][9] = { { 0 } };
unsigned semantic_name;
LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL;
unsigned semantic_name, semantic_index, semantic_usage;
unsigned target;
unsigned param_count = 0;
unsigned pos_idx;
int psize_index = -1, edgeflag_index = -1, layer_index = -1;
int i;
if (si_shader_ctx->shader->selector->so.num_outputs) {
si_llvm_emit_streamout(si_shader_ctx);
}
while (!tgsi_parse_end_of_tokens(parse)) {
struct tgsi_full_declaration *d =
&parse->FullToken.FullDeclaration;
unsigned target;
unsigned index;
for (i = 0; i < noutput; i++) {
semantic_name = outputs[i].name;
semantic_index = outputs[i].index;
semantic_usage = outputs[i].usage;
tgsi_parse_token(parse);
if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
continue;
i = si_store_shader_io_attribs(shader, d);
if (i < 0)
continue;
semantic_name = d->Semantic.Name;
handle_semantic:
for (index = d->Range.First; index <= d->Range.Last; index++) {
/* Select the correct target */
switch(semantic_name) {
case TGSI_SEMANTIC_PSIZE:
shader->vs_out_misc_write = true;
shader->vs_out_point_size = true;
psize_index = index;
/* Select the correct target */
switch(semantic_name) {
case TGSI_SEMANTIC_PSIZE:
shader->vs_out_misc_write = true;
shader->vs_out_point_size = true;
psize_value = outputs[i].values[0];
continue;
case TGSI_SEMANTIC_EDGEFLAG:
shader->vs_out_misc_write = true;
shader->vs_out_edgeflag = true;
edgeflag_value = outputs[i].values[0];
continue;
case TGSI_SEMANTIC_LAYER:
shader->vs_out_misc_write = true;
shader->vs_out_layer = true;
layer_value = outputs[i].values[0];
continue;
case TGSI_SEMANTIC_POSITION:
target = V_008DFC_SQ_EXP_POS;
break;
case TGSI_SEMANTIC_COLOR:
case TGSI_SEMANTIC_BCOLOR:
target = V_008DFC_SQ_EXP_PARAM + param_count;
shader->output[i].param_offset = param_count;
param_count++;
break;
case TGSI_SEMANTIC_CLIPDIST:
if (!(si_shader_ctx->shader->key.vs.ucps_enabled &
(1 << semantic_index)))
continue;
case TGSI_SEMANTIC_EDGEFLAG:
shader->vs_out_misc_write = true;
shader->vs_out_edgeflag = true;
edgeflag_index = index;
continue;
case TGSI_SEMANTIC_LAYER:
shader->vs_out_misc_write = true;
shader->vs_out_layer = true;
layer_index = index;
continue;
case TGSI_SEMANTIC_POSITION:
target = V_008DFC_SQ_EXP_POS;
break;
case TGSI_SEMANTIC_COLOR:
case TGSI_SEMANTIC_BCOLOR:
target = V_008DFC_SQ_EXP_PARAM + param_count;
shader->output[i].param_offset = param_count;
param_count++;
break;
case TGSI_SEMANTIC_CLIPDIST:
if (!(si_shader_ctx->shader->key.vs.ucps_enabled &
(1 << d->Semantic.Index)))
continue;
shader->clip_dist_write |=
d->Declaration.UsageMask << (d->Semantic.Index << 2);
target = V_008DFC_SQ_EXP_POS + 2 + d->Semantic.Index;
break;
case TGSI_SEMANTIC_CLIPVERTEX:
si_llvm_emit_clipvertex(bld_base, pos_args, index);
continue;
case TGSI_SEMANTIC_FOG:
case TGSI_SEMANTIC_GENERIC:
target = V_008DFC_SQ_EXP_PARAM + param_count;
shader->output[i].param_offset = param_count;
param_count++;
break;
default:
target = 0;
fprintf(stderr,
"Warning: SI unhandled vs output type:%d\n",
semantic_name);
}
shader->clip_dist_write |=
semantic_usage << (semantic_index << 2);
target = V_008DFC_SQ_EXP_POS + 2 + semantic_index;
break;
case TGSI_SEMANTIC_CLIPVERTEX:
si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
continue;
case TGSI_SEMANTIC_FOG:
case TGSI_SEMANTIC_GENERIC:
target = V_008DFC_SQ_EXP_PARAM + param_count;
shader->output[i].param_offset = param_count;
param_count++;
break;
default:
target = 0;
fprintf(stderr,
"Warning: SI unhandled vs output type:%d\n",
semantic_name);
}
si_llvm_init_export_args(bld_base, d, index, target, args);
si_llvm_init_export_args(bld_base, outputs[i].values, target, args);
if (target >= V_008DFC_SQ_EXP_POS &&
target <= (V_008DFC_SQ_EXP_POS + 3)) {
memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
args, sizeof(args));
} else {
lp_build_intrinsic(base->gallivm->builder,
"llvm.SI.export",
LLVMVoidTypeInContext(base->gallivm->context),
args, 9);
}
if (target >= V_008DFC_SQ_EXP_POS &&
target <= (V_008DFC_SQ_EXP_POS + 3)) {
memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
args, sizeof(args));
} else {
lp_build_intrinsic(base->gallivm->builder,
"llvm.SI.export",
LLVMVoidTypeInContext(base->gallivm->context),
args, 9);
}
if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
@@ -1017,31 +1093,27 @@ handle_semantic:
pos_args[1][7] = base->zero; /* Z */
pos_args[1][8] = base->zero; /* W */
if (shader->vs_out_point_size) {
pos_args[1][5] = LLVMBuildLoad(base->gallivm->builder,
si_shader_ctx->radeon_bld.soa.outputs[psize_index][0], "");
}
if (shader->vs_out_point_size)
pos_args[1][5] = psize_value;
if (shader->vs_out_edgeflag) {
LLVMValueRef output = LLVMBuildLoad(base->gallivm->builder,
si_shader_ctx->radeon_bld.soa.outputs[edgeflag_index][0], "");
/* The output is a float, but the hw expects an integer
* with the first bit containing the edge flag. */
output = LLVMBuildFPToUI(base->gallivm->builder, output,
bld_base->uint_bld.elem_type, "");
output = lp_build_min(&bld_base->int_bld, output, bld_base->int_bld.one);
edgeflag_value = LLVMBuildFPToUI(base->gallivm->builder,
edgeflag_value,
bld_base->uint_bld.elem_type, "");
edgeflag_value = lp_build_min(&bld_base->int_bld,
edgeflag_value,
bld_base->int_bld.one);
/* The LLVM intrinsic expects a float. */
pos_args[1][6] = LLVMBuildBitCast(base->gallivm->builder, output,
pos_args[1][6] = LLVMBuildBitCast(base->gallivm->builder,
edgeflag_value,
base->elem_type, "");
}
if (shader->vs_out_layer) {
pos_args[1][7] = LLVMBuildLoad(base->gallivm->builder,
si_shader_ctx->radeon_bld.soa.outputs[layer_index][0], "");
}
if (shader->vs_out_layer)
pos_args[1][7] = layer_value;
}
for (i = 0; i < 4; i++)
@@ -1067,6 +1139,116 @@ handle_semantic:
}
}
static void si_llvm_emit_es_epilogue(struct lp_build_tgsi_context * bld_base)
{
struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
struct si_pipe_shader *shader = si_shader_ctx->shader;
struct tgsi_parse_context *parse = &si_shader_ctx->parse;
LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
LLVMValueRef t_list_ptr;
LLVMValueRef t_list;
unsigned chan;
int i;
while (!tgsi_parse_end_of_tokens(parse)) {
struct tgsi_full_declaration *d =
&parse->FullToken.FullDeclaration;
tgsi_parse_token(parse);
if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
continue;
si_store_shader_io_attribs(&shader->shader, d);
}
/* Load the ESGS ring resource descriptor */
t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
t_list = build_indexed_load(si_shader_ctx, t_list_ptr,
lp_build_const_int32(gallivm,
NUM_PIPE_CONST_BUFFERS + 1));
for (i = 0; i < shader->shader.noutput; i++) {
LLVMValueRef *out_ptr =
si_shader_ctx->radeon_bld.soa.outputs[shader->shader.output[i].index];
for (chan = 0; chan < 4; chan++) {
LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
LLVMValueRef voffset =
lp_build_const_int32(gallivm, (4 * i + chan) * 4);
LLVMValueRef soffset =
LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
SI_PARAM_ES2GS_OFFSET);
out_val = LLVMBuildBitCast(gallivm->builder, out_val, i32, "");
build_tbuffer_store(si_shader_ctx, t_list, out_val, 1,
voffset, soffset, 0,
V_008F0C_BUF_DATA_FORMAT_32,
V_008F0C_BUF_NUM_FORMAT_UINT,
1, 0, 1, 1, 0);
}
}
}
static void si_llvm_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
{
struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
LLVMValueRef args[2];
args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_NOP | SENDMSG_GS_DONE);
args[1] = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
LLVMVoidTypeInContext(gallivm->context), args, 2,
LLVMNoUnwindAttribute);
}
static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context * bld_base)
{
struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
struct si_pipe_shader *shader = si_shader_ctx->shader;
struct tgsi_parse_context *parse = &si_shader_ctx->parse;
struct si_shader_output_values *outputs = NULL;
unsigned noutput = 0;
int i;
while (!tgsi_parse_end_of_tokens(parse)) {
struct tgsi_full_declaration *d =
&parse->FullToken.FullDeclaration;
unsigned index;
tgsi_parse_token(parse);
if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
continue;
i = si_store_shader_io_attribs(&shader->shader, d);
if (i < 0)
continue;
outputs = REALLOC(outputs, noutput * sizeof(outputs[0]),
(noutput + 1) * sizeof(outputs[0]));
for (index = d->Range.First; index <= d->Range.Last; index++) {
outputs[noutput].name = d->Semantic.Name;
outputs[noutput].index = d->Semantic.Index;
outputs[noutput].usage = d->Declaration.UsageMask;
for (i = 0; i < 4; i++)
outputs[noutput].values[i] =
LLVMBuildLoad(gallivm->builder,
si_shader_ctx->radeon_bld.soa.outputs[index][i],
"");
}
noutput++;
}
si_llvm_export_vs(bld_base, outputs, noutput);
FREE(outputs);
}
static void si_llvm_emit_fs_epilogue(struct lp_build_tgsi_context * bld_base)
{
struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
@@ -1114,10 +1296,14 @@ static void si_llvm_emit_fs_epilogue(struct lp_build_tgsi_context * bld_base)
case TGSI_SEMANTIC_COLOR:
target = V_008DFC_SQ_EXP_MRT + d->Semantic.Index;
if (si_shader_ctx->shader->key.ps.alpha_to_one)
si_alpha_to_one(bld_base, index);
LLVMBuildStore(bld_base->base.gallivm->builder,
bld_base->base.one,
si_shader_ctx->radeon_bld.soa.outputs[index][3]);
if (d->Semantic.Index == 0 &&
si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
si_alpha_test(bld_base, index);
si_alpha_test(bld_base,
si_shader_ctx->radeon_bld.soa.outputs[index]);
break;
default:
target = 0;
@@ -1126,7 +1312,9 @@ static void si_llvm_emit_fs_epilogue(struct lp_build_tgsi_context * bld_base)
semantic_name);
}
si_llvm_init_export_args(bld_base, d, index, target, args);
si_llvm_init_export_args_load(bld_base,
si_shader_ctx->radeon_bld.soa.outputs[index],
target, args);
if (semantic_name == TGSI_SEMANTIC_COLOR) {
/* If there is an export instruction waiting to be emitted, do so now. */
@@ -1144,8 +1332,9 @@ static void si_llvm_emit_fs_epilogue(struct lp_build_tgsi_context * bld_base)
if (shader->fs_write_all && shader->output[i].sid == 0 &&
si_shader_ctx->shader->key.ps.nr_cbufs > 1) {
for (int c = 1; c < si_shader_ctx->shader->key.ps.nr_cbufs; c++) {
si_llvm_init_export_args(bld_base, d, index,
V_008DFC_SQ_EXP_MRT + c, args);
si_llvm_init_export_args_load(bld_base,
si_shader_ctx->radeon_bld.soa.outputs[index],
V_008DFC_SQ_EXP_MRT + c, args);
lp_build_intrinsic(base->gallivm->builder,
"llvm.SI.export",
LLVMVoidTypeInContext(base->gallivm->context),
@@ -1658,6 +1847,93 @@ static void si_llvm_emit_ddxy(
#endif /* HAVE_LLVM >= 0x0304 */
/* Emit one vertex from the geometry shader */
static void si_llvm_emit_vertex(
const struct lp_build_tgsi_action *action,
struct lp_build_tgsi_context *bld_base,
struct lp_build_emit_data *emit_data)
{
struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
struct si_shader *shader = &si_shader_ctx->shader->shader;
struct gallivm_state *gallivm = bld_base->base.gallivm;
LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
LLVMValueRef t_list_ptr;
LLVMValueRef t_list;
LLVMValueRef args[2];
unsigned chan;
int i;
/* Load the GSVS ring resource descriptor */
t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
t_list = build_indexed_load(si_shader_ctx, t_list_ptr,
lp_build_const_int32(gallivm,
NUM_PIPE_CONST_BUFFERS + 2));
if (shader->noutput == 0) {
struct tgsi_parse_context *parse = &si_shader_ctx->parse;
while (!tgsi_parse_end_of_tokens(parse)) {
tgsi_parse_token(parse);
if (parse->FullToken.Token.Type == TGSI_TOKEN_TYPE_DECLARATION)
si_store_shader_io_attribs(shader,
&parse->FullToken.FullDeclaration);
}
}
/* Write vertex attribute values to GSVS ring */
for (i = 0; i < shader->noutput; i++) {
LLVMValueRef *out_ptr =
si_shader_ctx->radeon_bld.soa.outputs[shader->output[i].index];
for (chan = 0; chan < 4; chan++) {
LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
LLVMValueRef voffset =
lp_build_const_int32(gallivm,
((i * 4 + chan) *
shader->gs_max_out_vertices +
si_shader_ctx->gs_next_vertex) * 4);
LLVMValueRef soffset =
LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
SI_PARAM_GS2VS_OFFSET);
out_val = LLVMBuildBitCast(gallivm->builder, out_val, i32, "");
build_tbuffer_store(si_shader_ctx, t_list, out_val, 1,
voffset, soffset, 0,
V_008F0C_BUF_DATA_FORMAT_32,
V_008F0C_BUF_NUM_FORMAT_UINT,
1, 0, 1, 1, 0);
}
}
si_shader_ctx->gs_next_vertex++;
/* Signal vertex emission */
args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_EMIT | SENDMSG_GS);
args[1] = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
LLVMVoidTypeInContext(gallivm->context), args, 2,
LLVMNoUnwindAttribute);
}
/* Cut one primitive from the geometry shader */
static void si_llvm_emit_primitive(
const struct lp_build_tgsi_action *action,
struct lp_build_tgsi_context *bld_base,
struct lp_build_emit_data *emit_data)
{
struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
LLVMValueRef args[2];
/* Signal primitive cut */
args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_CUT | SENDMSG_GS);
args[1] = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_GS_WAVE_ID);
build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
LLVMVoidTypeInContext(gallivm->context), args, 2,
LLVMNoUnwindAttribute);
}
static const struct lp_build_tgsi_action tex_action = {
.fetch_args = tex_fetch_args,
.emit = build_tex_intrinsic,
@@ -1712,6 +1988,7 @@ static void create_function(struct si_shader_context *si_shader_ctx)
{
struct lp_build_tgsi_context *bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
struct gallivm_state *gallivm = bld_base->base.gallivm;
struct si_pipe_shader *shader = si_shader_ctx->shader;
LLVMTypeRef params[21], f32, i8, i32, v2i32, v3i32;
unsigned i, last_sgpr, num_params;
@@ -1736,20 +2013,24 @@ static void create_function(struct si_shader_context *si_shader_ctx)
params[SI_PARAM_SO_BUFFER] = params[SI_PARAM_CONST];
params[SI_PARAM_START_INSTANCE] = i32;
num_params = SI_PARAM_START_INSTANCE+1;
if (shader->key.vs.as_es) {
params[SI_PARAM_ES2GS_OFFSET] = i32;
num_params++;
} else {
/* The locations of the other parameters are assigned dynamically. */
/* The locations of the other parameters are assigned dynamically. */
/* Streamout SGPRs. */
if (shader->selector->so.num_outputs) {
params[si_shader_ctx->param_streamout_config = num_params++] = i32;
params[si_shader_ctx->param_streamout_write_index = num_params++] = i32;
}
/* A streamout buffer offset is loaded if the stride is non-zero. */
for (i = 0; i < 4; i++) {
if (!shader->selector->so.stride[i])
continue;
/* Streamout SGPRs. */
if (si_shader_ctx->shader->selector->so.num_outputs) {
params[si_shader_ctx->param_streamout_config = num_params++] = i32;
params[si_shader_ctx->param_streamout_write_index = num_params++] = i32;
}
/* A streamout buffer offset is loaded if the stride is non-zero. */
for (i = 0; i < 4; i++) {
if (!si_shader_ctx->shader->selector->so.stride[i])
continue;
params[si_shader_ctx->param_streamout_offset[i] = num_params++] = i32;
params[si_shader_ctx->param_streamout_offset[i] = num_params++] = i32;
}
}
last_sgpr = num_params-1;
@@ -1761,6 +2042,23 @@ static void create_function(struct si_shader_context *si_shader_ctx)
params[si_shader_ctx->param_instance_id = num_params++] = i32;
break;
case TGSI_PROCESSOR_GEOMETRY:
params[SI_PARAM_GS2VS_OFFSET] = i32;
params[SI_PARAM_GS_WAVE_ID] = i32;
last_sgpr = SI_PARAM_GS_WAVE_ID;
/* VGPRs */
params[SI_PARAM_VTX0_OFFSET] = i32;
params[SI_PARAM_VTX1_OFFSET] = i32;
params[SI_PARAM_PRIMITIVE_ID] = i32;
params[SI_PARAM_VTX2_OFFSET] = i32;
params[SI_PARAM_VTX3_OFFSET] = i32;
params[SI_PARAM_VTX4_OFFSET] = i32;
params[SI_PARAM_VTX5_OFFSET] = i32;
params[SI_PARAM_GS_INSTANCE_ID] = i32;
num_params = SI_PARAM_GS_INSTANCE_ID+1;
break;
case TGSI_PROCESSOR_FRAGMENT:
params[SI_PARAM_ALPHA_REF] = f32;
params[SI_PARAM_PRIM_MASK] = i32;
@@ -1812,8 +2110,9 @@ static void create_function(struct si_shader_context *si_shader_ctx)
}
#if HAVE_LLVM >= 0x0304
if (bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0)
if (bld_base->info &&
(bld_base->info->opcode_count[TGSI_OPCODE_DDX] > 0 ||
bld_base->info->opcode_count[TGSI_OPCODE_DDY] > 0))
si_shader_ctx->ddxy_lds =
LLVMAddGlobalInAddressSpace(gallivm->module,
LLVMArrayType(i32, 64),
@@ -1993,6 +2292,91 @@ int si_compile_llvm(struct si_context *sctx, struct si_pipe_shader *shader,
return 0;
}
/* Generate code for the hardware VS shader stage to go with a geometry shader */
static int si_generate_gs_copy_shader(struct si_context *sctx,
struct si_shader_context *si_shader_ctx,
bool dump)
{
struct gallivm_state *gallivm = &si_shader_ctx->radeon_bld.gallivm;
struct lp_build_tgsi_context *bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
struct lp_build_context *base = &bld_base->base;
struct lp_build_context *uint = &bld_base->uint_bld;
struct si_shader *gs = &si_shader_ctx->shader->selector->current->shader;
struct si_shader_output_values *outputs;
LLVMValueRef t_list_ptr, t_list;
LLVMValueRef args[9];
int i, r;
outputs = MALLOC(gs->noutput * sizeof(outputs[0]));
si_shader_ctx->type = TGSI_PROCESSOR_VERTEX;
si_shader_ctx->gs_for_vs = gs;
radeon_llvm_context_init(&si_shader_ctx->radeon_bld);
create_meta_data(si_shader_ctx);
create_function(si_shader_ctx);
preload_streamout_buffers(si_shader_ctx);
/* Load the GSVS ring resource descriptor */
t_list_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
t_list = build_indexed_load(si_shader_ctx, t_list_ptr,
lp_build_const_int32(gallivm,
NUM_PIPE_CONST_BUFFERS + 1));
args[0] = t_list;
args[1] = lp_build_mul_imm(uint,
LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
si_shader_ctx->param_vertex_id),
4);
args[3] = uint->zero;
args[4] = uint->one; /* OFFEN */
args[5] = uint->zero; /* IDXEN */
args[6] = uint->one; /* GLC */
args[7] = uint->one; /* SLC */
args[8] = uint->zero; /* TFE */
/* Fetch vertex data from GSVS ring */
for (i = 0; i < gs->noutput; ++i) {
struct si_shader_output *out = gs->output + i;
unsigned chan;
outputs[i].name = out->name;
outputs[i].index = out->index;
outputs[i].usage = out->usage;
for (chan = 0; chan < 4; chan++) {
args[2] = lp_build_const_int32(gallivm,
(i * 4 + chan) *
gs->gs_max_out_vertices * 16 * 4);
outputs[i].values[chan] =
LLVMBuildBitCast(gallivm->builder,
build_intrinsic(gallivm->builder,
"llvm.SI.buffer.load.dword.i32.i32",
LLVMInt32TypeInContext(gallivm->context),
args, 9,
LLVMReadOnlyAttribute | LLVMNoUnwindAttribute),
base->elem_type, "");
}
}
si_llvm_export_vs(bld_base, outputs, gs->noutput);
radeon_llvm_finalize_module(&si_shader_ctx->radeon_bld);
if (dump)
fprintf(stderr, "Copy Vertex Shader for Geometry Shader:\n\n");
r = si_compile_llvm(sctx, si_shader_ctx->shader,
bld_base->base.gallivm->module);
radeon_llvm_dispose(&si_shader_ctx->radeon_bld);
FREE(outputs);
return r;
}
int si_pipe_shader_create(
struct pipe_context *ctx,
struct si_pipe_shader *shader)
@@ -2036,6 +2420,9 @@ int si_pipe_shader_create(
bld_base->op_actions[TGSI_OPCODE_DDY].emit = si_llvm_emit_ddxy;
#endif
bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
si_shader_ctx.radeon_bld.load_system_value = declare_system_value;
si_shader_ctx.tokens = sel->tokens;
tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
@@ -2045,8 +2432,35 @@ int si_pipe_shader_create(
switch (si_shader_ctx.type) {
case TGSI_PROCESSOR_VERTEX:
si_shader_ctx.radeon_bld.load_input = declare_input_vs;
bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
if (shader->key.vs.as_es) {
si_shader_ctx.gs_for_vs = &sctx->gs_shader->current->shader;
bld_base->emit_epilogue = si_llvm_emit_es_epilogue;
} else {
bld_base->emit_epilogue = si_llvm_emit_vs_epilogue;
}
break;
case TGSI_PROCESSOR_GEOMETRY: {
int i;
si_shader_ctx.radeon_bld.load_input = declare_input_gs;
bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
bld_base->emit_epilogue = si_llvm_emit_gs_epilogue;
for (i = 0; i < shader_info.num_properties; i++) {
switch (shader_info.properties[i].name) {
case TGSI_PROPERTY_GS_INPUT_PRIM:
shader->shader.gs_input_prim = shader_info.properties[i].data[0];
break;
case TGSI_PROPERTY_GS_OUTPUT_PRIM:
shader->shader.gs_output_prim = shader_info.properties[i].data[0];
break;
case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES:
shader->shader.gs_max_out_vertices = shader_info.properties[i].data[0];
break;
}
}
break;
}
case TGSI_PROCESSOR_FRAGMENT:
si_shader_ctx.radeon_bld.load_input = declare_input_fs;
bld_base->emit_epilogue = si_llvm_emit_fs_epilogue;
@@ -2071,21 +2485,34 @@ int si_pipe_shader_create(
if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
for (int i = 0; i < NUM_CONST_BUFFERS; i++)
FREE(si_shader_ctx.constants[i]);
FREE(si_shader_ctx.resources);
FREE(si_shader_ctx.samplers);
return -EINVAL;
goto out;
}
radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
mod = bld_base->base.gallivm->module;
r = si_compile_llvm(sctx, shader, mod);
if (r) {
fprintf(stderr, "LLVM failed to compile shader\n");
goto out;
}
radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
if (si_shader_ctx.type == TGSI_PROCESSOR_GEOMETRY) {
shader->gs_copy_shader = CALLOC_STRUCT(si_pipe_shader);
shader->gs_copy_shader->selector = shader->selector;
si_shader_ctx.shader = shader->gs_copy_shader;
if ((r = si_generate_gs_copy_shader(sctx, &si_shader_ctx, dump))) {
free(shader->gs_copy_shader);
shader->gs_copy_shader = NULL;
goto out;
}
}
tgsi_parse_free(&si_shader_ctx.parse);
out:
for (int i = 0; i < NUM_CONST_BUFFERS; i++)
FREE(si_shader_ctx.constants[i]);
FREE(si_shader_ctx.resources);
+34 -3
View File
@@ -40,6 +40,7 @@
#define SI_SGPR_ALPHA_REF 6 /* PS only */
#define SI_VS_NUM_USER_SGPR 11
#define SI_GS_NUM_USER_SGPR 6
#define SI_PS_NUM_USER_SGPR 7
/* LLVM function parameter indices */
@@ -53,6 +54,21 @@
#define SI_PARAM_START_INSTANCE 5
/* the other VS parameters are assigned dynamically */
/* ES only parameters */
#define SI_PARAM_ES2GS_OFFSET 6
/* GS only parameters */
#define SI_PARAM_GS2VS_OFFSET 3
#define SI_PARAM_GS_WAVE_ID 4
#define SI_PARAM_VTX0_OFFSET 5
#define SI_PARAM_VTX1_OFFSET 6
#define SI_PARAM_PRIMITIVE_ID 7
#define SI_PARAM_VTX2_OFFSET 8
#define SI_PARAM_VTX3_OFFSET 9
#define SI_PARAM_VTX4_OFFSET 10
#define SI_PARAM_VTX5_OFFSET 11
#define SI_PARAM_GS_INSTANCE_ID 12
/* PS only parameters */
#define SI_PARAM_ALPHA_REF 3
#define SI_PARAM_PRIM_MASK 4
@@ -73,7 +89,7 @@
#define SI_PARAM_SAMPLE_COVERAGE 19
#define SI_PARAM_POS_FIXED_PT 20
struct si_shader_io {
struct si_shader_input {
unsigned name;
int sid;
unsigned param_offset;
@@ -81,6 +97,14 @@ struct si_shader_io {
bool centroid;
};
struct si_shader_output {
unsigned name;
int sid;
unsigned param_offset;
unsigned index;
unsigned usage;
};
struct si_pipe_shader;
struct si_pipe_shader_selector {
@@ -102,10 +126,15 @@ struct si_pipe_shader_selector {
struct si_shader {
unsigned ninput;
struct si_shader_io input[40];
struct si_shader_input input[40];
unsigned noutput;
struct si_shader_io output[40];
struct si_shader_output output[40];
/* geometry shader properties */
unsigned gs_input_prim;
unsigned gs_output_prim;
unsigned gs_max_out_vertices;
unsigned ninterp;
bool uses_kill;
@@ -131,12 +160,14 @@ union si_shader_key {
struct {
unsigned instance_divisors[PIPE_MAX_ATTRIBS];
unsigned ucps_enabled:2;
unsigned as_es:1;
} vs;
};
struct si_pipe_shader {
struct si_pipe_shader_selector *selector;
struct si_pipe_shader *next_variant;
struct si_pipe_shader *gs_copy_shader;
struct si_shader shader;
struct si_pm4_state *pm4;
struct r600_resource *bo;
+87 -6
View File
@@ -2183,6 +2183,8 @@ static INLINE void si_shader_selector_key(struct pipe_context *ctx,
key->vs.ucps_enabled |= 0x2;
if (sctx->queued.named.rasterizer->clip_plane_enable & 0xf)
key->vs.ucps_enabled |= 0x1;
key->vs.as_es = sctx->gs_shader != NULL;
} else if (sel->type == PIPE_SHADER_FRAGMENT) {
if (sel->fs_write_all)
key->ps.nr_cbufs = sctx->framebuffer.nr_cbufs;
@@ -2247,11 +2249,16 @@ int si_shader_select(struct pipe_context *ctx,
}
}
if (unlikely(!shader)) {
if (shader) {
shader->next_variant = sel->current;
sel->current = shader;
} else {
shader = CALLOC(1, sizeof(struct si_pipe_shader));
shader->selector = sel;
shader->key = key;
shader->next_variant = sel->current;
sel->current = shader;
r = si_pipe_shader_create(ctx, shader);
if (unlikely(r)) {
R600_ERR("Failed to build shader variant (type=%u) %d\n",
@@ -2266,8 +2273,6 @@ int si_shader_select(struct pipe_context *ctx,
if (dirty)
*dirty = 1;
shader->next_variant = sel->current;
sel->current = shader;
return 0;
}
@@ -2305,6 +2310,16 @@ static void *si_create_fs_state(struct pipe_context *ctx,
return si_create_shader_state(ctx, state, PIPE_SHADER_FRAGMENT);
}
#if HAVE_LLVM >= 0x0305
static void *si_create_gs_state(struct pipe_context *ctx,
const struct pipe_shader_state *state)
{
return si_create_shader_state(ctx, state, PIPE_SHADER_GEOMETRY);
}
#endif
static void *si_create_vs_state(struct pipe_context *ctx,
const struct pipe_shader_state *state)
{
@@ -2328,6 +2343,27 @@ static void si_bind_vs_shader(struct pipe_context *ctx, void *state)
sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
#if HAVE_LLVM >= 0x0305
static void si_bind_gs_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_pipe_shader_selector *sel = state;
if (sctx->gs_shader == sel)
return;
sctx->gs_shader = sel;
if (sel && sel->current) {
si_pm4_bind_state(sctx, gs, sel->current->pm4);
sctx->b.streamout.stride_in_dw = sel->so.stride;
sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
}
#endif
static void si_bind_ps_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
@@ -2374,6 +2410,22 @@ static void si_delete_vs_shader(struct pipe_context *ctx, void *state)
si_delete_shader_selector(ctx, sel);
}
#if HAVE_LLVM >= 0x0305
static void si_delete_gs_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_pipe_shader_selector *sel = (struct si_pipe_shader_selector *)state;
if (sctx->gs_shader == sel) {
sctx->gs_shader = NULL;
}
si_delete_shader_selector(ctx, sel);
}
#endif
static void si_delete_ps_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
@@ -2718,7 +2770,7 @@ static void si_set_sampler_views(struct pipe_context *ctx,
struct si_pipe_sampler_view **rviews = (struct si_pipe_sampler_view **)views;
int i;
if (shader != PIPE_SHADER_VERTEX && shader != PIPE_SHADER_FRAGMENT)
if (shader >= SI_NUM_SHADERS)
return;
assert(start == 0);
@@ -2855,6 +2907,16 @@ static void si_bind_vs_sampler_states(struct pipe_context *ctx, unsigned count,
si_pm4_set_state(sctx, vs_sampler, pm4);
}
static void si_bind_gs_sampler_states(struct pipe_context *ctx, unsigned count, void **states)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_pm4_state *pm4;
pm4 = si_set_sampler_states(sctx, count, states, &sctx->samplers[PIPE_SHADER_GEOMETRY],
R_00B230_SPI_SHADER_USER_DATA_GS_0);
si_pm4_set_state(sctx, gs_sampler, pm4);
}
static void si_bind_ps_sampler_states(struct pipe_context *ctx, unsigned count, void **states)
{
struct si_context *sctx = (struct si_context *)ctx;
@@ -2876,6 +2938,9 @@ static void si_bind_sampler_states(struct pipe_context *ctx, unsigned shader,
case PIPE_SHADER_VERTEX:
si_bind_vs_sampler_states(ctx, count, states);
break;
case PIPE_SHADER_GEOMETRY:
si_bind_gs_sampler_states(ctx, count, states);
break;
case PIPE_SHADER_FRAGMENT:
si_bind_ps_sampler_states(ctx, count, states);
break;
@@ -3108,6 +3173,11 @@ void si_init_state_functions(struct si_context *sctx)
sctx->b.b.bind_fs_state = si_bind_ps_shader;
sctx->b.b.delete_vs_state = si_delete_vs_shader;
sctx->b.b.delete_fs_state = si_delete_ps_shader;
#if HAVE_LLVM >= 0x0305
sctx->b.b.create_gs_state = si_create_gs_state;
sctx->b.b.bind_gs_state = si_bind_gs_shader;
sctx->b.b.delete_gs_state = si_delete_gs_shader;
#endif
sctx->b.b.create_sampler_state = si_create_sampler_state;
sctx->b.b.bind_sampler_states = si_bind_sampler_states;
@@ -3159,10 +3229,22 @@ void si_init_config(struct si_context *sctx)
si_pm4_set_reg(pm4, R_028A34_VGT_GROUP_VECT_1_CNTL, 0x0);
si_pm4_set_reg(pm4, R_028A38_VGT_GROUP_VECT_0_FMT_CNTL, 0x0);
si_pm4_set_reg(pm4, R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL, 0x0);
si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, 0x0);
/* FIXME calculate these values somehow ??? */
si_pm4_set_reg(pm4, R_028A54_VGT_GS_PER_ES, 0x80);
si_pm4_set_reg(pm4, R_028A58_VGT_ES_PER_GS, 0x40);
si_pm4_set_reg(pm4, R_028A5C_VGT_GS_PER_VS, 0x2);
si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, 0x0);
si_pm4_set_reg(pm4, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0);
si_pm4_set_reg(pm4, R_028AB8_VGT_VTX_CNT_EN, 0);
si_pm4_set_reg(pm4, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
si_pm4_set_reg(pm4, R_028B60_VGT_GS_VERT_ITEMSIZE_1, 0);
si_pm4_set_reg(pm4, R_028B64_VGT_GS_VERT_ITEMSIZE_2, 0);
si_pm4_set_reg(pm4, R_028B68_VGT_GS_VERT_ITEMSIZE_3, 0);
si_pm4_set_reg(pm4, R_028B90_VGT_GS_INSTANCE_CNT, 0);
si_pm4_set_reg(pm4, R_028B94_VGT_STRMOUT_CONFIG, 0x0);
si_pm4_set_reg(pm4, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0);
if (sctx->b.chip_class == SI) {
@@ -3177,7 +3259,6 @@ void si_init_config(struct si_context *sctx)
si_pm4_set_reg(pm4, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) |
S_008A14_CLIP_VTX_REORDER_ENA(1));
si_pm4_set_reg(pm4, R_028B54_VGT_SHADER_STAGES_EN, 0);
si_pm4_set_reg(pm4, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 0x76543210);
si_pm4_set_reg(pm4, R_028BD8_PA_SC_CENTROID_PRIORITY_1, 0xfedcba98);
+11 -1
View File
@@ -88,6 +88,11 @@ union si_state {
struct si_pm4_state *fb_rs;
struct si_pm4_state *fb_blend;
struct si_pm4_state *dsa_stencil_ref;
struct si_pm4_state *es;
struct si_pm4_state *gs;
struct si_pm4_state *gs_rings;
struct si_pm4_state *gs_sampler;
struct si_pm4_state *gs_onoff;
struct si_pm4_state *vs;
struct si_pm4_state *vs_sampler;
struct si_pm4_state *ps;
@@ -110,7 +115,7 @@ union si_state {
#define NUM_SAMPLER_STATES NUM_TEX_UNITS
#define NUM_PIPE_CONST_BUFFERS 16
#define NUM_CONST_BUFFERS 17
#define NUM_CONST_BUFFERS 19
/* This represents resource descriptors in memory, such as buffer resources,
* image resources, and sampler states.
@@ -193,6 +198,11 @@ struct si_buffer_resources {
void si_set_sampler_view(struct si_context *sctx, unsigned shader,
unsigned slot, struct pipe_sampler_view *view,
unsigned *view_desc);
void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
struct pipe_constant_buffer *input,
unsigned stride, unsigned num_records,
bool add_tid, bool swizzle,
unsigned element_size, unsigned index_stride);
void si_init_all_descriptors(struct si_context *sctx);
void si_release_all_descriptors(struct si_context *sctx);
void si_all_descriptors_begin_new_cs(struct si_context *sctx);
+252 -25
View File
@@ -39,6 +39,120 @@
* Shaders
*/
static void si_pipe_shader_es(struct pipe_context *ctx, struct si_pipe_shader *shader)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_pm4_state *pm4;
unsigned num_sgprs, num_user_sgprs;
unsigned vgpr_comp_cnt;
uint64_t va;
si_pm4_delete_state(sctx, es, shader->pm4);
pm4 = shader->pm4 = si_pm4_alloc_state(sctx);
if (pm4 == NULL)
return;
va = r600_resource_va(ctx->screen, (void *)shader->bo);
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ);
vgpr_comp_cnt = shader->shader.uses_instanceid ? 3 : 0;
num_user_sgprs = SI_VS_NUM_USER_SGPR;
num_sgprs = shader->num_sgprs;
/* One SGPR after user SGPRs is pre-loaded with es2gs_offset */
if ((num_user_sgprs + 1) > num_sgprs) {
/* Last 2 reserved SGPRs are used for VCC */
num_sgprs = num_user_sgprs + 1 + 2;
}
assert(num_sgprs <= 104);
si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, va >> 40);
si_pm4_set_reg(pm4, R_00B328_SPI_SHADER_PGM_RSRC1_ES,
S_00B328_VGPRS((shader->num_vgprs - 1) / 4) |
S_00B328_SGPRS((num_sgprs - 1) / 8) |
S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt));
si_pm4_set_reg(pm4, R_00B32C_SPI_SHADER_PGM_RSRC2_ES,
S_00B32C_USER_SGPR(num_user_sgprs));
si_pm4_bind_state(sctx, es, shader->pm4);
sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
static void si_pipe_shader_gs(struct pipe_context *ctx, struct si_pipe_shader *shader)
{
struct si_context *sctx = (struct si_context *)ctx;
unsigned gs_vert_itemsize = shader->shader.noutput * (16 >> 2);
unsigned gs_max_vert_out = shader->shader.gs_max_out_vertices;
unsigned gsvs_itemsize = gs_vert_itemsize * gs_max_vert_out;
unsigned cut_mode;
struct si_pm4_state *pm4;
unsigned num_sgprs, num_user_sgprs;
uint64_t va;
/* The GSVS_RING_ITEMSIZE register takes 15 bits */
assert(gsvs_itemsize < (1 << 15));
si_pm4_delete_state(sctx, gs, shader->pm4);
pm4 = shader->pm4 = si_pm4_alloc_state(sctx);
if (pm4 == NULL)
return;
if (gs_max_vert_out <= 128) {
cut_mode = V_028A40_GS_CUT_128;
} else if (gs_max_vert_out <= 256) {
cut_mode = V_028A40_GS_CUT_256;
} else if (gs_max_vert_out <= 512) {
cut_mode = V_028A40_GS_CUT_512;
} else {
assert(gs_max_vert_out <= 1024);
cut_mode = V_028A40_GS_CUT_1024;
}
si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE,
S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
S_028A40_CUT_MODE(cut_mode)|
S_028A40_ES_WRITE_OPTIMIZE(1) |
S_028A40_GS_WRITE_OPTIMIZE(1));
si_pm4_set_reg(pm4, R_028A60_VGT_GSVS_RING_OFFSET_1, gsvs_itemsize);
si_pm4_set_reg(pm4, R_028A64_VGT_GSVS_RING_OFFSET_2, gsvs_itemsize);
si_pm4_set_reg(pm4, R_028A68_VGT_GSVS_RING_OFFSET_3, gsvs_itemsize);
si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
shader->shader.ninput * (16 >> 2));
si_pm4_set_reg(pm4, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gsvs_itemsize);
si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, gs_max_vert_out);
si_pm4_set_reg(pm4, R_028B5C_VGT_GS_VERT_ITEMSIZE, gs_vert_itemsize);
va = r600_resource_va(ctx->screen, (void *)shader->bo);
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ);
si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, va >> 40);
num_user_sgprs = SI_GS_NUM_USER_SGPR;
num_sgprs = shader->num_sgprs;
/* Two SGPRs after user SGPRs are pre-loaded with gs2vs_offset, gs_wave_id */
if ((num_user_sgprs + 2) > num_sgprs) {
/* Last 2 reserved SGPRs are used for VCC */
num_sgprs = num_user_sgprs + 2 + 2;
}
assert(num_sgprs <= 104);
si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
S_00B228_VGPRS((shader->num_vgprs - 1) / 4) |
S_00B228_SGPRS((num_sgprs - 1) / 8));
si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
S_00B22C_USER_SGPR(num_user_sgprs));
si_pm4_bind_state(sctx, gs, shader->pm4);
sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
}
static void si_pipe_shader_vs(struct pipe_context *ctx, struct si_pipe_shader *shader)
{
struct si_context *sctx = (struct si_context *)ctx;
@@ -53,6 +167,19 @@ static void si_pipe_shader_vs(struct pipe_context *ctx, struct si_pipe_shader *s
if (pm4 == NULL)
return;
va = r600_resource_va(ctx->screen, (void *)shader->bo);
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ);
vgpr_comp_cnt = shader->shader.uses_instanceid ? 3 : 0;
num_user_sgprs = SI_VS_NUM_USER_SGPR;
num_sgprs = shader->num_sgprs;
if (num_user_sgprs > num_sgprs) {
/* Last 2 reserved SGPRs are used for VCC */
num_sgprs = num_user_sgprs + 2;
}
assert(num_sgprs <= 104);
/* Certain attributes (position, psize, etc.) don't count as params.
* VS is required to export at least one param and r600_shader_from_tgsi()
* takes care of adding a dummy export.
@@ -85,21 +212,8 @@ static void si_pipe_shader_vs(struct pipe_context *ctx, struct si_pipe_shader *s
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE));
va = r600_resource_va(ctx->screen, (void *)shader->bo);
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ);
si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8);
si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, va >> 40);
num_user_sgprs = SI_VS_NUM_USER_SGPR;
num_sgprs = shader->num_sgprs;
if (num_user_sgprs > num_sgprs) {
/* Last 2 reserved SGPRs are used for VCC */
num_sgprs = num_user_sgprs + 2;
}
assert(num_sgprs <= 104);
vgpr_comp_cnt = shader->shader.uses_instanceid ? 3 : 0;
si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS,
S_00B128_VGPRS((shader->num_vgprs - 1) / 4) |
S_00B128_SGPRS((num_sgprs - 1) / 8) |
@@ -291,7 +405,10 @@ static bool si_update_draw_info_state(struct si_context *sctx,
struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
struct si_shader *vs = &sctx->vs_shader->current->shader;
unsigned prim = si_conv_pipe_prim(info->mode);
unsigned gs_out_prim = si_conv_prim_to_gs_out(info->mode);
unsigned gs_out_prim =
si_conv_prim_to_gs_out(sctx->gs_shader ?
sctx->gs_shader->current->shader.gs_output_prim :
info->mode);
unsigned ls_mask = 0;
if (pm4 == NULL)
@@ -331,7 +448,9 @@ static bool si_update_draw_info_state(struct si_context *sctx,
info->indexed ? info->index_bias : info->start);
si_pm4_set_reg(pm4, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info->restart_index);
si_pm4_set_reg(pm4, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
si_pm4_set_reg(pm4, R_00B130_SPI_SHADER_USER_DATA_VS_0 + SI_SGPR_START_INSTANCE * 4,
si_pm4_set_reg(pm4, SI_SGPR_START_INSTANCE * 4 +
(sctx->gs_shader ? R_00B330_SPI_SHADER_USER_DATA_ES_0 :
R_00B130_SPI_SHADER_USER_DATA_VS_0),
info->start_instance);
if (prim == V_008958_DI_PT_LINELIST)
@@ -423,6 +542,45 @@ bcolor:
si_pm4_set_state(sctx, spi, pm4);
}
/* Initialize state related to ESGS / GSVS ring buffers */
static void si_init_gs_rings(struct si_context *sctx)
{
unsigned size = 128 * 1024;
assert(!sctx->gs_rings);
sctx->gs_rings = si_pm4_alloc_state(sctx);
sctx->esgs_ring.buffer =
pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
PIPE_USAGE_STATIC, size);
sctx->esgs_ring.buffer_size = size;
size = 64 * 1024 * 1024;
sctx->gsvs_ring.buffer =
pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
PIPE_USAGE_STATIC, size);
sctx->gsvs_ring.buffer_size = size;
if (sctx->b.chip_class >= CIK) {
si_pm4_set_reg(sctx->gs_rings, R_030900_VGT_ESGS_RING_SIZE,
sctx->esgs_ring.buffer_size / 256);
si_pm4_set_reg(sctx->gs_rings, R_030904_VGT_GSVS_RING_SIZE,
sctx->gsvs_ring.buffer_size / 256);
} else {
si_pm4_set_reg(sctx->gs_rings, R_0088C8_VGT_ESGS_RING_SIZE,
sctx->esgs_ring.buffer_size / 256);
si_pm4_set_reg(sctx->gs_rings, R_0088CC_VGT_GSVS_RING_SIZE,
sctx->gsvs_ring.buffer_size / 256);
}
si_set_ring_buffer(&sctx->b.b, SI_SHADER_EXPORT, 0, &sctx->esgs_ring,
0, sctx->esgs_ring.buffer_size, true, true, 4, 64);
si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_GEOMETRY, 0, &sctx->esgs_ring,
0, sctx->esgs_ring.buffer_size, false, false, 0, 0);
si_set_ring_buffer(&sctx->b.b, PIPE_SHADER_VERTEX, 0, &sctx->gsvs_ring,
0, sctx->gsvs_ring.buffer_size, false, false, 0, 0);
}
static void si_update_derived_state(struct si_context *sctx)
{
struct pipe_context * ctx = (struct pipe_context*)sctx;
@@ -440,18 +598,79 @@ static void si_update_derived_state(struct si_context *sctx)
}
}
si_shader_select(ctx, sctx->vs_shader, &vs_dirty);
if (sctx->gs_shader) {
unsigned es_dirty = 0, gs_dirty = 0;
if (!sctx->vs_shader->current->pm4) {
si_pipe_shader_vs(ctx, sctx->vs_shader->current);
vs_dirty = 0;
si_shader_select(ctx, sctx->gs_shader, &gs_dirty);
if (!sctx->gs_shader->current->pm4) {
si_pipe_shader_gs(ctx, sctx->gs_shader->current);
si_pipe_shader_vs(ctx,
sctx->gs_shader->current->gs_copy_shader);
gs_dirty = 0;
}
if (gs_dirty) {
si_pm4_bind_state(sctx, gs, sctx->gs_shader->current->pm4);
si_pm4_bind_state(sctx, vs,
sctx->gs_shader->current->gs_copy_shader->pm4);
}
si_shader_select(ctx, sctx->vs_shader, &es_dirty);
if (!sctx->vs_shader->current->pm4) {
si_pipe_shader_es(ctx, sctx->vs_shader->current);
es_dirty = 0;
}
if (es_dirty) {
si_pm4_bind_state(sctx, es, sctx->vs_shader->current->pm4);
}
if (!sctx->gs_rings)
si_init_gs_rings(sctx);
if (sctx->emitted.named.gs_rings != sctx->gs_rings)
sctx->b.flags |= R600_CONTEXT_VGT_FLUSH;
si_pm4_bind_state(sctx, gs_rings, sctx->gs_rings);
si_set_ring_buffer(ctx, PIPE_SHADER_GEOMETRY, 1, &sctx->gsvs_ring,
sctx->gs_shader->current->shader.gs_max_out_vertices *
sctx->gs_shader->current->shader.noutput * 16,
64, true, true, 4, 16);
if (!sctx->gs_on) {
sctx->gs_on = si_pm4_alloc_state(sctx);
si_pm4_set_reg(sctx->gs_on, R_028B54_VGT_SHADER_STAGES_EN,
S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) |
S_028B54_GS_EN(1) |
S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER));
}
si_pm4_bind_state(sctx, gs_onoff, sctx->gs_on);
} else {
si_shader_select(ctx, sctx->vs_shader, &vs_dirty);
if (!sctx->vs_shader->current->pm4) {
si_pipe_shader_vs(ctx, sctx->vs_shader->current);
vs_dirty = 0;
}
if (vs_dirty) {
si_pm4_bind_state(sctx, vs, sctx->vs_shader->current->pm4);
}
if (!sctx->gs_off) {
sctx->gs_off = si_pm4_alloc_state(sctx);
si_pm4_set_reg(sctx->gs_off, R_028A40_VGT_GS_MODE, 0);
si_pm4_set_reg(sctx->gs_off, R_028B54_VGT_SHADER_STAGES_EN, 0);
}
si_pm4_bind_state(sctx, gs_onoff, sctx->gs_off);
si_pm4_bind_state(sctx, gs_rings, NULL);
si_pm4_bind_state(sctx, gs, NULL);
si_pm4_bind_state(sctx, es, NULL);
}
if (vs_dirty) {
si_pm4_bind_state(sctx, vs, sctx->vs_shader->current->pm4);
}
si_shader_select(ctx, sctx->ps_shader, &ps_dirty);
if (!sctx->ps_shader->current->pm4) {
@@ -531,7 +750,10 @@ static void si_vertex_buffer_update(struct si_context *sctx)
bound[ve->vertex_buffer_index] = true;
}
}
si_pm4_sh_data_end(pm4, R_00B130_SPI_SHADER_USER_DATA_VS_0, SI_SGPR_VERTEX_BUFFER);
si_pm4_sh_data_end(pm4, sctx->gs_shader ?
R_00B330_SPI_SHADER_USER_DATA_ES_0 :
R_00B130_SPI_SHADER_USER_DATA_VS_0,
SI_SGPR_VERTEX_BUFFER);
si_pm4_set_state(sctx, vertex_buffers, pm4);
}
@@ -691,6 +913,11 @@ void si_emit_cache_flush(struct r600_common_context *sctx, struct r600_atom *ato
radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
}
if (sctx->flags & R600_CONTEXT_VGT_FLUSH) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
}
sctx->flags = 0;
}