pan/mdg: Use unified atomics

This doesn't actually save anything, since Italo already introduced magic macros
for this, but it ticks off one more driver on the list to convert. It's also
more legible, so that's nice :-)

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Jesse Natalie <jenatali@microsoft.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22914>
This commit is contained in:
Alyssa Rosenzweig
2023-05-08 18:14:14 -04:00
committed by Marge Bot
parent 133af0d6c9
commit 3c823351c0
+45 -43
View File
@@ -463,6 +463,7 @@ optimise_nir(nir_shader *nir, unsigned quirks, bool is_blend)
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_lower_legacy_atomics);
/* Backend scheduler is purely local, so do some global optimizations
* to reduce register pressure. */
@@ -552,22 +553,6 @@ nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
return false;
}
#define ATOMIC_CASE_IMPL(ctx, instr, nir, op, is_shared) \
case nir_intrinsic_##nir: \
emit_atomic(ctx, instr, is_shared, midgard_op_##op, ~0); \
break;
#define ATOMIC_CASE(ctx, instr, nir, op) \
ATOMIC_CASE_IMPL(ctx, instr, shared_atomic_##nir, atomic_##op, true); \
ATOMIC_CASE_IMPL(ctx, instr, global_atomic_##nir, atomic_##op, false);
#define IMAGE_ATOMIC_CASE(ctx, instr, nir, op) \
case nir_intrinsic_image_atomic_##nir: { \
midgard_instruction ins = emit_image_op(ctx, instr, true); \
emit_atomic(ctx, instr, false, midgard_op_atomic_##op, ins.dest); \
break; \
}
#define ALU_CASE(nir, _op) \
case nir_op_##nir: \
op = midgard_alu_op_##_op; \
@@ -1329,20 +1314,45 @@ emit_global(compiler_context *ctx, nir_instr *instr, bool is_read,
emit_mir_instruction(ctx, ins);
}
/* If is_shared is off, the only other possible value are globals, since
* SSBO's are being lowered to globals through a NIR pass.
* `image_direct_address` should be ~0 when instr is not an image_atomic
* and the destination register of a lea_image op when it is an image_atomic. */
static void
emit_atomic(compiler_context *ctx, nir_intrinsic_instr *instr, bool is_shared,
midgard_load_store_op op, unsigned image_direct_address)
static midgard_load_store_op
translate_atomic_op(nir_atomic_op op)
{
/* clang-format off */
switch (op) {
case nir_atomic_op_xchg: return midgard_op_atomic_xchg;
case nir_atomic_op_cmpxchg: return midgard_op_atomic_cmpxchg;
case nir_atomic_op_iadd: return midgard_op_atomic_add;
case nir_atomic_op_iand: return midgard_op_atomic_and;
case nir_atomic_op_imax: return midgard_op_atomic_imax;
case nir_atomic_op_imin: return midgard_op_atomic_imin;
case nir_atomic_op_ior: return midgard_op_atomic_or;
case nir_atomic_op_umax: return midgard_op_atomic_umax;
case nir_atomic_op_umin: return midgard_op_atomic_umin;
case nir_atomic_op_ixor: return midgard_op_atomic_xor;
default: unreachable("Unexpected atomic");
}
/* clang-format on */
}
/* Emit an atomic to shared memory or global memory. Image atomics are
* implemented as global memory atomics on direct addresses calculated by
* lea_image and passed as image_direct_address, otherwise that should be ~0.
*/
static void
emit_atomic(compiler_context *ctx, nir_intrinsic_instr *instr,
unsigned image_direct_address)
{
midgard_load_store_op op =
translate_atomic_op(nir_intrinsic_atomic_op(instr));
nir_alu_type type =
(op == midgard_op_atomic_imin || op == midgard_op_atomic_imax)
? nir_type_int
: nir_type_uint;
bool is_image = image_direct_address != ~0;
bool is_shared = (instr->intrinsic == nir_intrinsic_shared_atomic) ||
(instr->intrinsic == nir_intrinsic_shared_atomic_swap);
unsigned dest = nir_dest_index(&instr->dest);
unsigned val_src = is_image ? 3 : 1;
@@ -2072,27 +2082,19 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
}
break;
ATOMIC_CASE(ctx, instr, add, add);
ATOMIC_CASE(ctx, instr, and, and);
ATOMIC_CASE(ctx, instr, comp_swap, cmpxchg);
ATOMIC_CASE(ctx, instr, exchange, xchg);
ATOMIC_CASE(ctx, instr, imax, imax);
ATOMIC_CASE(ctx, instr, imin, imin);
ATOMIC_CASE(ctx, instr, or, or);
ATOMIC_CASE(ctx, instr, umax, umax);
ATOMIC_CASE(ctx, instr, umin, umin);
ATOMIC_CASE(ctx, instr, xor, xor);
case nir_intrinsic_shared_atomic:
case nir_intrinsic_shared_atomic_swap:
case nir_intrinsic_global_atomic:
case nir_intrinsic_global_atomic_swap:
emit_atomic(ctx, instr, ~0);
break;
IMAGE_ATOMIC_CASE(ctx, instr, add, add);
IMAGE_ATOMIC_CASE(ctx, instr, and, and);
IMAGE_ATOMIC_CASE(ctx, instr, comp_swap, cmpxchg);
IMAGE_ATOMIC_CASE(ctx, instr, exchange, xchg);
IMAGE_ATOMIC_CASE(ctx, instr, imax, imax);
IMAGE_ATOMIC_CASE(ctx, instr, imin, imin);
IMAGE_ATOMIC_CASE(ctx, instr, or, or);
IMAGE_ATOMIC_CASE(ctx, instr, umax, umax);
IMAGE_ATOMIC_CASE(ctx, instr, umin, umin);
IMAGE_ATOMIC_CASE(ctx, instr, xor, xor);
case nir_intrinsic_image_atomic:
case nir_intrinsic_image_atomic_swap: {
midgard_instruction lea_image = emit_image_op(ctx, instr, true);
emit_atomic(ctx, instr, lea_image.dest);
break;
}
default:
fprintf(stderr, "Unhandled intrinsic %s\n",