r600/sfn: drop unused code

Signed-off-by: Gert Wollny <gert.wollny@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/37846>
This commit is contained in:
Gert Wollny
2025-09-16 14:31:46 +02:00
committed by Marge Bot
parent 0f7dd6636c
commit a2e4280dbe

View File

@@ -1168,197 +1168,4 @@ r600_merge_vec2_stores(nir_shader *shader)
return merger.combine();
}
static bool
r600_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
{
b->cursor = nir_after_instr(&instr->instr);
switch (instr->intrinsic) {
case nir_intrinsic_load_ubo:
case nir_intrinsic_load_ubo_vec4:
case nir_intrinsic_load_uniform:
case nir_intrinsic_load_ssbo:
case nir_intrinsic_load_input:
case nir_intrinsic_load_interpolated_input:
case nir_intrinsic_load_per_vertex_input:
case nir_intrinsic_store_output:
case nir_intrinsic_store_per_vertex_output:
case nir_intrinsic_store_ssbo:
break;
default:
return false;
}
if (instr->num_components <= 2)
return false;
bool has_dest = nir_intrinsic_infos[instr->intrinsic].has_dest;
if (has_dest) {
if (instr->def.bit_size != 64)
return false;
} else {
if (nir_src_bit_size(instr->src[0]) != 64)
return false;
}
nir_intrinsic_instr *first =
nir_instr_as_intrinsic(nir_instr_clone(b->shader, &instr->instr));
nir_intrinsic_instr *second =
nir_instr_as_intrinsic(nir_instr_clone(b->shader, &instr->instr));
switch (instr->intrinsic) {
case nir_intrinsic_load_ubo:
case nir_intrinsic_load_ubo_vec4:
case nir_intrinsic_load_uniform:
case nir_intrinsic_load_ssbo:
case nir_intrinsic_store_ssbo:
break;
default: {
nir_io_semantics semantics = nir_intrinsic_io_semantics(second);
semantics.location++;
semantics.num_slots--;
nir_intrinsic_set_io_semantics(second, semantics);
nir_intrinsic_set_base(second, nir_intrinsic_base(second) + 1);
break;
}
}
first->num_components = 2;
second->num_components -= 2;
if (has_dest) {
first->def.num_components = 2;
second->def.num_components -= 2;
}
nir_builder_instr_insert(b, &first->instr);
nir_builder_instr_insert(b, &second->instr);
if (has_dest) {
/* Merge the two loads' results back into a vector. */
nir_scalar channels[4] = {
nir_get_scalar(&first->def, 0),
nir_get_scalar(&first->def, 1),
nir_get_scalar(&second->def, 0),
nir_get_scalar(&second->def, second->num_components > 1 ? 1 : 0),
};
nir_def *new_ir = nir_vec_scalars(b, channels, instr->num_components);
nir_def_rewrite_uses(&instr->def, new_ir);
} else {
/* Split the src value across the two stores. */
b->cursor = nir_before_instr(&instr->instr);
nir_def *src0 = instr->src[0].ssa;
nir_scalar channels[4] = {{0}};
for (int i = 0; i < instr->num_components; i++)
channels[i] = nir_get_scalar(src0, i);
nir_intrinsic_set_write_mask(first, nir_intrinsic_write_mask(instr) & 3);
nir_intrinsic_set_write_mask(second, nir_intrinsic_write_mask(instr) >> 2);
nir_src_rewrite(&first->src[0], nir_vec_scalars(b, channels, 2));
nir_src_rewrite(&second->src[0],
nir_vec_scalars(b, &channels[2], second->num_components));
}
int offset_src = -1;
uint32_t offset_amount = 16;
switch (instr->intrinsic) {
case nir_intrinsic_load_ssbo:
case nir_intrinsic_load_ubo:
offset_src = 1;
break;
case nir_intrinsic_load_ubo_vec4:
case nir_intrinsic_load_uniform:
offset_src = 0;
offset_amount = 1;
break;
case nir_intrinsic_store_ssbo:
offset_src = 2;
break;
default:
break;
}
if (offset_src != -1) {
b->cursor = nir_before_instr(&second->instr);
nir_def *second_offset =
nir_iadd_imm(b, second->src[offset_src].ssa, offset_amount);
nir_src_rewrite(&second->src[offset_src], second_offset);
}
/* DCE stores we generated with no writemask (nothing else does this
* currently).
*/
if (!has_dest) {
if (nir_intrinsic_write_mask(first) == 0)
nir_instr_remove(&first->instr);
if (nir_intrinsic_write_mask(second) == 0)
nir_instr_remove(&second->instr);
}
nir_instr_remove(&instr->instr);
return true;
}
static bool
r600_lower_64bit_load_const(nir_builder *b, nir_load_const_instr *instr)
{
int num_components = instr->def.num_components;
if (instr->def.bit_size != 64 || num_components <= 2)
return false;
b->cursor = nir_before_instr(&instr->instr);
nir_load_const_instr *first = nir_load_const_instr_create(b->shader, 2, 64);
nir_load_const_instr *second =
nir_load_const_instr_create(b->shader, num_components - 2, 64);
first->value[0] = instr->value[0];
first->value[1] = instr->value[1];
second->value[0] = instr->value[2];
if (num_components == 4)
second->value[1] = instr->value[3];
nir_builder_instr_insert(b, &first->instr);
nir_builder_instr_insert(b, &second->instr);
nir_def *channels[4] = {
nir_channel(b, &first->def, 0),
nir_channel(b, &first->def, 1),
nir_channel(b, &second->def, 0),
num_components == 4 ? nir_channel(b, &second->def, 1) : NULL,
};
nir_def *new_ir = nir_vec(b, channels, num_components);
nir_def_replace(&instr->def, new_ir);
return true;
}
static bool
r600_lower_64bit_to_vec2_instr(nir_builder *b, nir_instr *instr, UNUSED void *data)
{
switch (instr->type) {
case nir_instr_type_load_const:
return r600_lower_64bit_load_const(b, nir_instr_as_load_const(instr));
case nir_instr_type_intrinsic:
return r600_lower_64bit_intrinsic(b, nir_instr_as_intrinsic(instr));
default:
return false;
}
}
bool
r600_lower_64bit_to_vec2(nir_shader *s)
{
return nir_shader_instructions_pass(s,
r600_lower_64bit_to_vec2_instr,
nir_metadata_control_flow,
NULL);
}
} // end namespace r600