pan: Move pan_shader NIR helpers to pan_compiler.h
Acked-by: Erik Faye-Lund <erik.faye-lund@collabora.com> Acked-by: Boris Brezillon <boris.brezillon@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38753>
This commit is contained in:
committed by
Marge Bot
parent
55ae25f2d7
commit
b7f45c66fe
@@ -9,6 +9,7 @@
|
||||
#include "compiler/nir/nir.h"
|
||||
#include "compiler/nir/nir_builder.h"
|
||||
#include "pan_shader.h"
|
||||
#include "panfrost/compiler/pan_compiler.h"
|
||||
#include "panfrost/compiler/pan_nir_lower_framebuffer.h"
|
||||
#include "pan_context.h"
|
||||
|
||||
@@ -112,8 +113,8 @@ GENX(pan_blend_get_shader_locked)(struct pan_blend_shader_cache *cache,
|
||||
#endif
|
||||
|
||||
struct pan_shader_info info;
|
||||
pan_shader_preprocess(nir, inputs.gpu_id);
|
||||
pan_shader_postprocess(nir, inputs.gpu_id);
|
||||
pan_preprocess_nir(nir, inputs.gpu_id);
|
||||
pan_postprocess_nir(nir, inputs.gpu_id);
|
||||
|
||||
#if PAN_ARCH >= 6
|
||||
NIR_PASS(_, nir, GENX(pan_inline_rt_conversion), rt_formats);
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
#include "pan_pool.h"
|
||||
#include "pan_shader.h"
|
||||
#include "pan_texture.h"
|
||||
#include "compiler/pan_compiler.h"
|
||||
|
||||
#if PAN_ARCH >= 6
|
||||
/* On Midgard, the native preload infrastructure (via MFBD preloads) is broken
|
||||
@@ -459,7 +460,7 @@ pan_preload_get_shader(struct pan_fb_preload_cache *cache,
|
||||
}
|
||||
|
||||
nir_builder b = nir_builder_init_simple_shader(
|
||||
MESA_SHADER_FRAGMENT, pan_shader_get_compiler_options(PAN_ARCH),
|
||||
MESA_SHADER_FRAGMENT, pan_get_nir_shader_compiler_options(PAN_ARCH),
|
||||
"pan_preload(%s)", sig);
|
||||
|
||||
nir_def *barycentric = nir_load_barycentric(
|
||||
@@ -558,9 +559,9 @@ pan_preload_get_shader(struct pan_fb_preload_cache *cache,
|
||||
for (unsigned i = 0; i < active_count; ++i)
|
||||
BITSET_SET(b.shader->info.textures_used, i);
|
||||
|
||||
pan_shader_preprocess(b.shader, inputs.gpu_id);
|
||||
pan_shader_lower_texture_early(b.shader, inputs.gpu_id);
|
||||
pan_shader_postprocess(b.shader, inputs.gpu_id);
|
||||
pan_preprocess_nir(b.shader, inputs.gpu_id);
|
||||
pan_nir_lower_texture_early(b.shader, inputs.gpu_id);
|
||||
pan_postprocess_nir(b.shader, inputs.gpu_id);
|
||||
|
||||
if (PAN_ARCH == 4) {
|
||||
NIR_PASS(_, b.shader, nir_shader_intrinsics_pass,
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
#include "pan_context.h"
|
||||
#include "pan_resource.h"
|
||||
#include "pan_screen.h"
|
||||
#include "pan_shader.h"
|
||||
#include "pan_compiler.h"
|
||||
|
||||
#define panfrost_afbc_add_info_ubo(name, b) \
|
||||
nir_variable *info_ubo = nir_variable_create( \
|
||||
@@ -202,7 +202,7 @@ panfrost_create_afbc_size_shader(struct panfrost_screen *screen,
|
||||
struct panfrost_device *dev = pan_device(&screen->base);
|
||||
|
||||
nir_builder b = nir_builder_init_simple_shader(
|
||||
MESA_SHADER_COMPUTE, pan_shader_get_compiler_options(dev->arch),
|
||||
MESA_SHADER_COMPUTE, pan_get_nir_shader_compiler_options(dev->arch),
|
||||
"panfrost_afbc_size(uncompressed_size=%u, align=%u)",
|
||||
key->afbc.uncompressed_size, align);
|
||||
|
||||
@@ -239,7 +239,7 @@ panfrost_create_afbc_pack_shader(struct panfrost_screen *screen,
|
||||
unsigned align = key->afbc.align;
|
||||
struct panfrost_device *dev = pan_device(&screen->base);
|
||||
nir_builder b = nir_builder_init_simple_shader(
|
||||
MESA_SHADER_COMPUTE, pan_shader_get_compiler_options(dev->arch),
|
||||
MESA_SHADER_COMPUTE, pan_get_nir_shader_compiler_options(dev->arch),
|
||||
"panfrost_afbc_pack");
|
||||
|
||||
panfrost_afbc_add_info_ubo(pack, b);
|
||||
@@ -284,7 +284,7 @@ panfrost_create_mtk_tiled_detile_shader(
|
||||
const struct panfrost_device *device = &screen->dev;
|
||||
bool tint_yuv = (device->debug & PAN_DBG_YUV) != 0;
|
||||
nir_builder b = nir_builder_init_simple_shader(
|
||||
MESA_SHADER_COMPUTE, pan_shader_get_compiler_options(device->arch),
|
||||
MESA_SHADER_COMPUTE, pan_get_nir_shader_compiler_options(device->arch),
|
||||
"panfrost_mtk_detile");
|
||||
b.shader->info.workgroup_size[0] = 4;
|
||||
b.shader->info.workgroup_size[1] = 16;
|
||||
|
||||
@@ -53,7 +53,7 @@
|
||||
#include "pan_public.h"
|
||||
#include "pan_resource.h"
|
||||
#include "pan_screen.h"
|
||||
#include "pan_shader.h"
|
||||
#include "pan_compiler.h"
|
||||
#include "pan_util.h"
|
||||
|
||||
#include "pan_context.h"
|
||||
@@ -1147,7 +1147,8 @@ panfrost_create_screen(int fd, const struct pipe_screen_config *config,
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i <= MESA_SHADER_COMPUTE; i++)
|
||||
screen->base.nir_options[i] = pan_shader_get_compiler_options(pan_screen(&screen->base)->dev.arch);
|
||||
screen->base.nir_options[i] =
|
||||
pan_get_nir_shader_compiler_options(dev->arch);
|
||||
|
||||
switch (dev->arch) {
|
||||
case 4:
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
#include "nir_serialize.h"
|
||||
#include "pan_bo.h"
|
||||
#include "pan_context.h"
|
||||
#include "pan_compiler.h"
|
||||
#include "shader_enums.h"
|
||||
|
||||
static struct panfrost_uncompiled_shader *
|
||||
@@ -137,9 +138,9 @@ panfrost_shader_compile(struct panfrost_screen *screen, const nir_shader *ir,
|
||||
* happens at CSO create time regardless.
|
||||
*/
|
||||
if (mesa_shader_stage_is_compute(s->info.stage)) {
|
||||
pan_shader_preprocess(s, panfrost_device_gpu_id(dev));
|
||||
pan_shader_lower_texture_early(s, panfrost_device_gpu_id(dev));
|
||||
pan_shader_postprocess(s, panfrost_device_gpu_id(dev));
|
||||
pan_preprocess_nir(s, panfrost_device_gpu_id(dev));
|
||||
pan_nir_lower_texture_early(s, panfrost_device_gpu_id(dev));
|
||||
pan_postprocess_nir(s, panfrost_device_gpu_id(dev));
|
||||
}
|
||||
|
||||
struct pan_compile_inputs inputs = {
|
||||
@@ -225,7 +226,7 @@ panfrost_shader_compile(struct panfrost_screen *screen, const nir_shader *ir,
|
||||
|
||||
/* Lower resource indices */
|
||||
NIR_PASS(_, s, panfrost_nir_lower_res_indices, &inputs);
|
||||
pan_shader_lower_texture_late(s, inputs.gpu_id);
|
||||
pan_nir_lower_texture_late(s, inputs.gpu_id);
|
||||
|
||||
if (dev->arch >= 9) {
|
||||
inputs.valhall.use_ld_var_buf = panfrost_use_ld_var_buf(s);
|
||||
@@ -549,13 +550,13 @@ panfrost_create_shader_state(struct pipe_context *pctx,
|
||||
|
||||
/* Then run the suite of lowering and optimization, including I/O lowering */
|
||||
struct panfrost_device *dev = pan_device(pctx->screen);
|
||||
pan_shader_preprocess(nir, panfrost_device_gpu_id(dev));
|
||||
pan_shader_lower_texture_early(nir, panfrost_device_gpu_id(dev));
|
||||
pan_preprocess_nir(nir, panfrost_device_gpu_id(dev));
|
||||
pan_nir_lower_texture_early(nir, panfrost_device_gpu_id(dev));
|
||||
|
||||
NIR_PASS(_, nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
|
||||
glsl_type_size, nir_lower_io_use_interpolated_input_intrinsics);
|
||||
|
||||
pan_shader_postprocess(nir, panfrost_device_gpu_id(dev));
|
||||
pan_postprocess_nir(nir, panfrost_device_gpu_id(dev));
|
||||
|
||||
if (nir->info.stage == MESA_SHADER_FRAGMENT)
|
||||
so->noperspective_varyings =
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include "compiler/glsl_types.h"
|
||||
#include "compiler/spirv/nir_spirv.h"
|
||||
#include "panfrost/compiler/bifrost/bifrost_compile.h"
|
||||
#include "panfrost/compiler/pan_compiler.h"
|
||||
#include "nir.h"
|
||||
#include "nir_builder.h"
|
||||
#include "nir_builder_opcodes.h"
|
||||
@@ -419,10 +420,10 @@ main(int argc, const char **argv)
|
||||
NIR_PASS(progress, s, nir_opt_loop);
|
||||
} while (progress);
|
||||
|
||||
pan_shader_preprocess(s, inputs.gpu_id);
|
||||
pan_shader_lower_texture_early(s, inputs.gpu_id);
|
||||
pan_shader_postprocess(s, inputs.gpu_id);
|
||||
pan_shader_lower_texture_late(s, inputs.gpu_id);
|
||||
pan_preprocess_nir(s, inputs.gpu_id);
|
||||
pan_nir_lower_texture_early(s, inputs.gpu_id);
|
||||
pan_postprocess_nir(s, inputs.gpu_id);
|
||||
pan_nir_lower_texture_late(s, inputs.gpu_id);
|
||||
|
||||
NIR_PASS(_, s, nir_opt_deref);
|
||||
NIR_PASS(_, s, nir_lower_vars_to_ssa);
|
||||
|
||||
@@ -24,12 +24,89 @@
|
||||
|
||||
#include "pan_compiler.h"
|
||||
|
||||
#include "bifrost/bifrost_compile.h"
|
||||
#include "bifrost/bifrost/disassemble.h"
|
||||
#include "bifrost/valhall/disassemble.h"
|
||||
#include "midgard/disassemble.h"
|
||||
#include "midgard/midgard_compile.h"
|
||||
|
||||
#include "panfrost/model/pan_model.h"
|
||||
|
||||
const nir_shader_compiler_options *
|
||||
pan_get_nir_shader_compiler_options(unsigned arch)
|
||||
{
|
||||
switch (arch) {
|
||||
case 4:
|
||||
case 5:
|
||||
return &midgard_nir_options;
|
||||
case 6:
|
||||
case 7:
|
||||
return &bifrost_nir_options_v6;
|
||||
case 9:
|
||||
case 10:
|
||||
return &bifrost_nir_options_v9;
|
||||
case 11:
|
||||
case 12:
|
||||
case 13:
|
||||
return &bifrost_nir_options_v11;
|
||||
default:
|
||||
assert(!"Unsupported arch");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
pan_preprocess_nir(nir_shader *nir, unsigned gpu_id)
|
||||
{
|
||||
if (pan_arch(gpu_id) >= 6)
|
||||
bifrost_preprocess_nir(nir, gpu_id);
|
||||
else
|
||||
midgard_preprocess_nir(nir, gpu_id);
|
||||
}
|
||||
|
||||
void
|
||||
pan_optimize_nir(nir_shader *nir, unsigned gpu_id)
|
||||
{
|
||||
assert(pan_arch(gpu_id) >= 6);
|
||||
bifrost_optimize_nir(nir, gpu_id);
|
||||
}
|
||||
|
||||
void
|
||||
pan_postprocess_nir(nir_shader *nir, unsigned gpu_id)
|
||||
{
|
||||
if (pan_arch(gpu_id) >= 6)
|
||||
bifrost_postprocess_nir(nir, gpu_id);
|
||||
else
|
||||
midgard_postprocess_nir(nir, gpu_id);
|
||||
}
|
||||
|
||||
void
|
||||
pan_nir_lower_texture_early(nir_shader *nir, unsigned gpu_id)
|
||||
{
|
||||
nir_lower_tex_options lower_tex_options = {
|
||||
.lower_txs_lod = true,
|
||||
.lower_txp = ~0,
|
||||
.lower_tg4_offsets = true,
|
||||
.lower_tg4_broadcom_swizzle = true,
|
||||
.lower_txd = pan_arch(gpu_id) < 6,
|
||||
.lower_txd_cube_map = true,
|
||||
.lower_invalid_implicit_lod = true,
|
||||
.lower_index_to_offset = pan_arch(gpu_id) >= 6,
|
||||
};
|
||||
|
||||
NIR_PASS(_, nir, nir_lower_tex, &lower_tex_options);
|
||||
}
|
||||
|
||||
void
|
||||
pan_nir_lower_texture_late(nir_shader *nir, unsigned gpu_id)
|
||||
{
|
||||
/* This must be called after any lowering of resource indices
|
||||
* (panfrost_nir_lower_res_indices / panvk_per_arch(nir_lower_descriptors))
|
||||
*/
|
||||
if (pan_arch(gpu_id) >= 6)
|
||||
bifrost_lower_texture_late_nir(nir, gpu_id);
|
||||
}
|
||||
|
||||
void
|
||||
pan_disassemble(FILE *fp, const void *code, size_t size,
|
||||
unsigned gpu_id, bool verbose)
|
||||
|
||||
@@ -28,6 +28,19 @@
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
|
||||
typedef struct nir_shader nir_shader;
|
||||
struct nir_shader_compiler_options;
|
||||
|
||||
const struct nir_shader_compiler_options *
|
||||
pan_get_nir_shader_compiler_options(unsigned arch);
|
||||
|
||||
void pan_preprocess_nir(nir_shader *nir, unsigned gpu_id);
|
||||
void pan_optimize_nir(nir_shader *nir, unsigned gpu_id);
|
||||
void pan_postprocess_nir(nir_shader *nir, unsigned gpu_id);
|
||||
|
||||
void pan_nir_lower_texture_early(nir_shader *nir, unsigned gpu_id);
|
||||
void pan_nir_lower_texture_late(nir_shader *nir, unsigned gpu_id);
|
||||
|
||||
void pan_disassemble(FILE *fp, const void *code, size_t size,
|
||||
unsigned gpu_id, bool verbose);
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include "util/blend.h"
|
||||
|
||||
#ifdef PAN_ARCH
|
||||
#include "pan_shader.h"
|
||||
#include "pan_texture.h"
|
||||
#endif
|
||||
|
||||
@@ -34,6 +33,8 @@
|
||||
#include "compiler/nir/nir_builder.h"
|
||||
#include "compiler/nir/nir_conversion_builder.h"
|
||||
#include "compiler/nir/nir_lower_blend.h"
|
||||
#include "compiler/pan_compiler.h"
|
||||
#include "compiler/pan_nir_lower_framebuffer.h"
|
||||
#include "util/format/u_format.h"
|
||||
|
||||
#ifndef PAN_ARCH
|
||||
@@ -610,7 +611,7 @@ GENX(pan_blend_create_shader)(const struct pan_blend_state *state,
|
||||
get_equation_str(rt_state, equation_str, sizeof(equation_str));
|
||||
|
||||
nir_builder b = nir_builder_init_simple_shader(
|
||||
MESA_SHADER_FRAGMENT, pan_shader_get_compiler_options(PAN_ARCH),
|
||||
MESA_SHADER_FRAGMENT, pan_get_nir_shader_compiler_options(PAN_ARCH),
|
||||
"pan_blend(rt=%d,fmt=%s,nr_samples=%d,%s=%s)", rt,
|
||||
util_format_name(rt_state->format), rt_state->nr_samples,
|
||||
state->logicop_enable ? "logicop" : "equation",
|
||||
|
||||
@@ -30,29 +30,6 @@
|
||||
#include "panfrost/compiler/bifrost/bifrost_compile.h"
|
||||
#include "panfrost/compiler/midgard/midgard_compile.h"
|
||||
|
||||
const nir_shader_compiler_options *
|
||||
pan_shader_get_compiler_options(unsigned arch)
|
||||
{
|
||||
switch (arch) {
|
||||
case 4:
|
||||
case 5:
|
||||
return &midgard_nir_options;
|
||||
case 6:
|
||||
case 7:
|
||||
return &bifrost_nir_options_v6;
|
||||
case 9:
|
||||
case 10:
|
||||
return &bifrost_nir_options_v9;
|
||||
case 11:
|
||||
case 12:
|
||||
case 13:
|
||||
return &bifrost_nir_options_v11;
|
||||
default:
|
||||
assert(!"Unsupported arch");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
pan_shader_compile(nir_shader *s, struct pan_compile_inputs *inputs,
|
||||
struct util_dynarray *binary, struct pan_shader_info *info)
|
||||
|
||||
@@ -31,15 +31,6 @@
|
||||
#include "panfrost/compiler/pan_ir.h"
|
||||
#include "panfrost/compiler/pan_nir_lower_framebuffer.h"
|
||||
|
||||
void bifrost_preprocess_nir(nir_shader *nir, unsigned gpu_id);
|
||||
void bifrost_optimize_nir(nir_shader *nir, unsigned gpu_id);
|
||||
void bifrost_postprocess_nir(nir_shader *nir, unsigned gpu_id);
|
||||
void bifrost_lower_texture_nir(nir_shader *nir, unsigned gpu_id);
|
||||
void bifrost_lower_texture_late_nir(nir_shader *nir, unsigned gpu_id);
|
||||
void midgard_preprocess_nir(nir_shader *nir, unsigned gpu_id);
|
||||
void midgard_postprocess_nir(nir_shader *nir, unsigned gpu_id);
|
||||
void midgard_lower_texture_nir(nir_shader *nir, unsigned gpu_id);
|
||||
|
||||
static unsigned
|
||||
pan_get_fixed_varying_mask(unsigned varyings_used)
|
||||
{
|
||||
@@ -47,63 +38,10 @@ pan_get_fixed_varying_mask(unsigned varyings_used)
|
||||
~VARYING_BIT_POS & ~VARYING_BIT_PSIZ;
|
||||
}
|
||||
|
||||
static inline void
|
||||
pan_shader_preprocess(nir_shader *nir, unsigned gpu_id)
|
||||
{
|
||||
if (pan_arch(gpu_id) >= 6)
|
||||
bifrost_preprocess_nir(nir, gpu_id);
|
||||
else
|
||||
midgard_preprocess_nir(nir, gpu_id);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pan_shader_optimize(nir_shader *nir, unsigned gpu_id)
|
||||
{
|
||||
assert(pan_arch(gpu_id) >= 6);
|
||||
bifrost_optimize_nir(nir, gpu_id);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pan_shader_postprocess(nir_shader *nir, unsigned gpu_id)
|
||||
{
|
||||
if (pan_arch(gpu_id) >= 6)
|
||||
bifrost_postprocess_nir(nir, gpu_id);
|
||||
else
|
||||
midgard_postprocess_nir(nir, gpu_id);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pan_shader_lower_texture_early(nir_shader *nir, unsigned gpu_id)
|
||||
{
|
||||
nir_lower_tex_options lower_tex_options = {
|
||||
.lower_txs_lod = true,
|
||||
.lower_txp = ~0,
|
||||
.lower_tg4_offsets = true,
|
||||
.lower_tg4_broadcom_swizzle = true,
|
||||
.lower_txd = pan_arch(gpu_id) < 6,
|
||||
.lower_txd_cube_map = true,
|
||||
.lower_invalid_implicit_lod = true,
|
||||
.lower_index_to_offset = pan_arch(gpu_id) >= 6,
|
||||
};
|
||||
|
||||
NIR_PASS(_, nir, nir_lower_tex, &lower_tex_options);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pan_shader_lower_texture_late(nir_shader *nir, unsigned gpu_id)
|
||||
{
|
||||
/* This must be called after any lowering of resource indices
|
||||
* (panfrost_nir_lower_res_indices / panvk_per_arch(nir_lower_descriptors)) */
|
||||
if (pan_arch(gpu_id) >= 6)
|
||||
bifrost_lower_texture_late_nir(nir, gpu_id);
|
||||
}
|
||||
|
||||
void pan_shader_compile(nir_shader *nir, struct pan_compile_inputs *inputs,
|
||||
struct util_dynarray *binary,
|
||||
struct pan_shader_info *info);
|
||||
|
||||
const nir_shader_compiler_options *pan_shader_get_compiler_options(unsigned arch);
|
||||
|
||||
#ifdef PAN_ARCH
|
||||
|
||||
#if PAN_ARCH >= 9
|
||||
|
||||
@@ -287,7 +287,7 @@ panvk_meta_desc_copy_rsd(struct panvk_device *dev)
|
||||
|
||||
nir_builder b = nir_builder_init_simple_shader(
|
||||
MESA_SHADER_COMPUTE,
|
||||
pan_shader_get_compiler_options(
|
||||
pan_get_nir_shader_compiler_options(
|
||||
pan_arch(phys_dev->kmod.props.gpu_id)),
|
||||
"%s", "desc_copy");
|
||||
|
||||
@@ -306,8 +306,8 @@ panvk_meta_desc_copy_rsd(struct panvk_device *dev)
|
||||
.gpu_variant = phys_dev->kmod.props.gpu_variant,
|
||||
};
|
||||
|
||||
pan_shader_preprocess(b.shader, inputs.gpu_id);
|
||||
pan_shader_postprocess(b.shader, inputs.gpu_id);
|
||||
pan_preprocess_nir(b.shader, inputs.gpu_id);
|
||||
pan_postprocess_nir(b.shader, inputs.gpu_id);
|
||||
|
||||
VkResult result = panvk_per_arch(create_internal_shader)(
|
||||
dev, b.shader, &inputs, &shader);
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#error "PAN_ARCH must be defined"
|
||||
#endif
|
||||
|
||||
#include "compiler/pan_compiler.h"
|
||||
#include "compiler/pan_ir.h"
|
||||
|
||||
#include "pan_desc.h"
|
||||
|
||||
@@ -98,8 +98,8 @@ get_blend_shader(struct panvk_device *dev,
|
||||
},
|
||||
};
|
||||
|
||||
pan_shader_preprocess(nir, inputs.gpu_id);
|
||||
pan_shader_postprocess(nir, inputs.gpu_id);
|
||||
pan_preprocess_nir(nir, inputs.gpu_id);
|
||||
pan_postprocess_nir(nir, inputs.gpu_id);
|
||||
|
||||
enum pipe_format rt_formats[8] = {0};
|
||||
rt_formats[rt] = key.info.format;
|
||||
|
||||
@@ -71,7 +71,7 @@ static nir_shader *
|
||||
get_preload_nir_shader(const struct panvk_fb_preload_shader_key *key)
|
||||
{
|
||||
nir_builder builder = nir_builder_init_simple_shader(
|
||||
MESA_SHADER_FRAGMENT, pan_shader_get_compiler_options(PAN_ARCH),
|
||||
MESA_SHADER_FRAGMENT, pan_get_nir_shader_compiler_options(PAN_ARCH),
|
||||
"panvk-meta-preload");
|
||||
nir_builder *b = &builder;
|
||||
nir_def *sample_id =
|
||||
@@ -153,10 +153,10 @@ get_preload_shader(struct panvk_device *dev,
|
||||
.is_blit = true,
|
||||
};
|
||||
|
||||
pan_shader_preprocess(nir, inputs.gpu_id);
|
||||
pan_shader_lower_texture_early(nir, inputs.gpu_id);
|
||||
pan_shader_postprocess(nir, inputs.gpu_id);
|
||||
pan_shader_lower_texture_late(nir, inputs.gpu_id);
|
||||
pan_preprocess_nir(nir, inputs.gpu_id);
|
||||
pan_nir_lower_texture_early(nir, inputs.gpu_id);
|
||||
pan_postprocess_nir(nir, inputs.gpu_id);
|
||||
pan_nir_lower_texture_late(nir, inputs.gpu_id);
|
||||
|
||||
VkResult result = panvk_per_arch(create_internal_shader)(
|
||||
dev, nir, &inputs, &shader);
|
||||
|
||||
@@ -369,7 +369,7 @@ panvk_get_nir_options(UNUSED struct vk_physical_device *vk_pdev,
|
||||
UNUSED const struct vk_pipeline_robustness_state *rs)
|
||||
{
|
||||
struct panvk_physical_device *phys_dev = to_panvk_physical_device(vk_pdev);
|
||||
return pan_shader_get_compiler_options(pan_arch(phys_dev->kmod.props.gpu_id));
|
||||
return pan_get_nir_shader_compiler_options(pan_arch(phys_dev->kmod.props.gpu_id));
|
||||
}
|
||||
|
||||
static struct spirv_to_nir_options
|
||||
@@ -428,17 +428,17 @@ panvk_preprocess_nir(struct vk_physical_device *vk_pdev,
|
||||
* 1. Compile SPIR-V to NIR and maybe do a tiny bit of lowering that needs
|
||||
* to be done really early.
|
||||
*
|
||||
* 2. pan_shader_preprocess: Does common lowering and runs the optimization
|
||||
* 2. pan_preprocess_nir: Does common lowering and runs the optimization
|
||||
* loop. Nothing here should be API-specific.
|
||||
*
|
||||
* 3. Do additional lowering in panvk
|
||||
*
|
||||
* 4. pan_shader_postprocess: Does final lowering and runs the optimization
|
||||
* 4. pan_postprocess_nir: Does final lowering and runs the optimization
|
||||
* loop again. This can happen as part of the final compile.
|
||||
*
|
||||
* This would give us a better place to do panvk-specific lowering.
|
||||
*/
|
||||
pan_shader_lower_texture_early(nir, pdev->kmod.props.gpu_id);
|
||||
pan_nir_lower_texture_early(nir, pdev->kmod.props.gpu_id);
|
||||
NIR_PASS(_, nir, nir_lower_system_values);
|
||||
|
||||
nir_lower_compute_system_values_options options = {
|
||||
@@ -450,7 +450,7 @@ panvk_preprocess_nir(struct vk_physical_device *vk_pdev,
|
||||
if (nir->info.stage == MESA_SHADER_FRAGMENT)
|
||||
NIR_PASS(_, nir, nir_lower_wpos_center);
|
||||
|
||||
pan_shader_optimize(nir, pdev->kmod.props.gpu_id);
|
||||
pan_optimize_nir(nir, pdev->kmod.props.gpu_id);
|
||||
|
||||
NIR_PASS(_, nir, nir_split_var_copies);
|
||||
NIR_PASS(_, nir, nir_lower_var_copies);
|
||||
@@ -909,7 +909,7 @@ panvk_lower_nir(struct panvk_device *dev, nir_shader *nir,
|
||||
nir_log_shaderi(nir);
|
||||
}
|
||||
|
||||
pan_shader_preprocess(nir, compile_input->gpu_id);
|
||||
pan_preprocess_nir(nir, compile_input->gpu_id);
|
||||
|
||||
/* Postprocess can add copies back in and lower_io can't handle them */
|
||||
NIR_PASS(_, nir, nir_lower_var_copies);
|
||||
@@ -918,8 +918,8 @@ panvk_lower_nir(struct panvk_device *dev, nir_shader *nir,
|
||||
NIR_PASS(_, nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
|
||||
glsl_type_size, nir_lower_io_use_interpolated_input_intrinsics);
|
||||
|
||||
pan_shader_postprocess(nir, compile_input->gpu_id);
|
||||
pan_shader_lower_texture_late(nir, compile_input->gpu_id);
|
||||
pan_postprocess_nir(nir, compile_input->gpu_id);
|
||||
pan_nir_lower_texture_late(nir, compile_input->gpu_id);
|
||||
|
||||
if (stage == MESA_SHADER_VERTEX)
|
||||
NIR_PASS(_, nir, nir_shader_intrinsics_pass, panvk_lower_load_vs_input,
|
||||
|
||||
Reference in New Issue
Block a user