tu: C++-proof: do not goto over variables initialization

That is not permitted by C++

Signed-off-by: Danylo Piliaiev <dpiliaiev@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21931>
This commit is contained in:
Danylo Piliaiev
2023-03-07 18:03:37 +01:00
committed by Marge Bot
parent af3e075f05
commit a8dc6fbf83
4 changed files with 55 additions and 44 deletions

View File

@@ -2136,6 +2136,10 @@ tu_CreateDevice(VkPhysicalDevice physicalDevice,
if (!is_kgsl(physical_device->instance))
vk_device_set_drm_fd(&device->vk, device->fd);
struct tu6_global *global = NULL;
uint32_t global_size = sizeof(struct tu6_global);
struct vk_pipeline_cache_create_info pcc_info = { };
for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
const VkDeviceQueueCreateInfo *queue_create =
&pCreateInfo->pQueueCreateInfos[i];
@@ -2190,7 +2194,6 @@ tu_CreateDevice(VkPhysicalDevice physicalDevice,
device->vsc_draw_strm_pitch = 0x1000 + VSC_PAD;
device->vsc_prim_strm_pitch = 0x4000 + VSC_PAD;
uint32_t global_size = sizeof(struct tu6_global);
if (custom_border_colors)
global_size += TU_BORDER_COLOR_COUNT * sizeof(struct bcolor_entry);
@@ -2213,7 +2216,7 @@ tu_CreateDevice(VkPhysicalDevice physicalDevice,
goto fail_global_bo_map;
}
struct tu6_global *global = device->global_bo->map;
global = device->global_bo->map;
tu_init_clear_blit_shaders(device);
global->predicate = 0;
global->vtx_stats_query_not_running = 1;
@@ -2237,7 +2240,6 @@ tu_CreateDevice(VkPhysicalDevice physicalDevice,
goto fail_dynamic_rendering;
}
struct vk_pipeline_cache_create_info pcc_info = { };
device->mem_cache = vk_pipeline_cache_create(&device->vk, &pcc_info,
false);
if (!device->mem_cache) {

View File

@@ -410,6 +410,20 @@ tu_GetPhysicalDeviceFormatProperties2(
}
}
static VkResult
tu_image_unsupported_format(VkImageFormatProperties *pImageFormatProperties)
{
*pImageFormatProperties = (VkImageFormatProperties) {
.maxExtent = { 0, 0, 0 },
.maxMipLevels = 0,
.maxArrayLayers = 0,
.sampleCounts = 0,
.maxResourceSize = 0,
};
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
static VkResult
tu_get_image_format_properties(
struct tu_physical_device *physical_device,
@@ -476,11 +490,11 @@ tu_get_image_format_properties(
}
if (format_feature_flags == 0)
goto unsupported;
return tu_image_unsupported_format(pImageFormatProperties);
if (info->type != VK_IMAGE_TYPE_2D &&
vk_format_is_depth_or_stencil(info->format))
goto unsupported;
return tu_image_unsupported_format(pImageFormatProperties);
switch (info->type) {
default:
@@ -542,26 +556,26 @@ tu_get_image_format_properties(
if (image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) {
if (!(format_feature_flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
goto unsupported;
return tu_image_unsupported_format(pImageFormatProperties);
}
}
if (image_usage & VK_IMAGE_USAGE_STORAGE_BIT) {
if (!(format_feature_flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
goto unsupported;
return tu_image_unsupported_format(pImageFormatProperties);
}
}
if (image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
if (!(format_feature_flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
goto unsupported;
return tu_image_unsupported_format(pImageFormatProperties);
}
}
if (image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
if (!(format_feature_flags &
VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
goto unsupported;
return tu_image_unsupported_format(pImageFormatProperties);
}
}
@@ -581,16 +595,6 @@ tu_get_image_format_properties(
*p_feature_flags = format_feature_flags;
return VK_SUCCESS;
unsupported:
*pImageFormatProperties = (VkImageFormatProperties) {
.maxExtent = { 0, 0, 0 },
.maxMipLevels = 0,
.maxArrayLayers = 0,
.sampleCounts = 0,
.maxResourceSize = 0,
};
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
static VkResult

View File

@@ -304,8 +304,10 @@ tu_bo_init(struct tu_device *dev,
result = tu_allocate_kernel_iova(dev, gem_handle, &iova);
}
if (result != VK_SUCCESS)
goto fail_bo_list;
if (result != VK_SUCCESS) {
tu_gem_close(dev, gem_handle);
return result;
}
name = tu_debug_bos_add(dev, size, name);
@@ -319,8 +321,8 @@ tu_bo_init(struct tu_device *dev,
vk_realloc(&dev->vk.alloc, dev->bo_list, new_len * sizeof(*dev->bo_list),
8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!new_ptr) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto fail_bo_list;
tu_gem_close(dev, gem_handle);
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
dev->bo_list = new_ptr;
@@ -347,10 +349,6 @@ tu_bo_init(struct tu_device *dev,
mtx_unlock(&dev->bo_mutex);
return VK_SUCCESS;
fail_bo_list:
tu_gem_close(dev, gem_handle);
return result;
}
/**
@@ -1183,6 +1181,7 @@ tu_knl_drm_msm_load(struct tu_instance *instance,
struct tu_physical_device **out)
{
VkResult result = VK_SUCCESS;
int ret;
/* Version 1.6 added SYNCOBJ support. */
const int min_version_major = 1;
@@ -1251,7 +1250,7 @@ tu_knl_drm_msm_load(struct tu_instance *instance,
*/
device->has_set_iova = false;
int ret = tu_drm_get_param(device, MSM_PARAM_FAULTS, &device->fault_count);
ret = tu_drm_get_param(device, MSM_PARAM_FAULTS, &device->fault_count);
if (ret != 0) {
result = vk_startup_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
"Failed to get initial fault count: %d", ret);

View File

@@ -3117,6 +3117,10 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
};
VkPipelineCreationFeedback stage_feedbacks[MESA_SHADER_STAGES] = { 0 };
const bool executable_info =
builder->create_info->flags &
VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR;
int64_t pipeline_start = os_time_get_nano();
const VkPipelineCreationFeedbackCreateInfo *creation_feedback =
@@ -3138,7 +3142,16 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
};
}
/* Forward declare everything due to the goto usage */
nir_shader *nir[ARRAY_SIZE(stage_infos)] = { NULL };
nir_shader *post_link_nir[ARRAY_SIZE(nir)] = { NULL };
struct tu_shader *shaders[ARRAY_SIZE(nir)] = { NULL };
char *nir_initial_disasm[ARRAY_SIZE(stage_infos)] = { NULL };
struct ir3_shader_variant *safe_const_variants[ARRAY_SIZE(nir)] = { NULL };
struct tu_shader *last_shader = NULL;
uint32_t desc_sets = 0;
uint32_t safe_constlens = 0;
struct tu_shader_key keys[ARRAY_SIZE(stage_infos)] = { };
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
@@ -3189,11 +3202,6 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
memcpy(nir_sha1, pipeline_sha1, sizeof(pipeline_sha1));
nir_sha1[20] = 'N';
const bool executable_info = builder->create_info->flags &
VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR;
char *nir_initial_disasm[ARRAY_SIZE(stage_infos)] = { NULL };
if (!executable_info) {
bool cache_hit = false;
bool application_cache_hit = false;
@@ -3236,8 +3244,6 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
return VK_PIPELINE_COMPILE_REQUIRED;
}
struct tu_shader *shaders[ARRAY_SIZE(nir)] = { NULL };
for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < ARRAY_SIZE(nir);
stage = (gl_shader_stage) (stage + 1)) {
const VkPipelineShaderStageCreateInfo *stage_info = stage_infos[stage];
@@ -3306,7 +3312,7 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
goto fail;
}
uint32_t desc_sets = 0;
desc_sets = 0;
for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < ARRAY_SIZE(nir);
stage = (gl_shader_stage) (stage + 1)) {
if (!nir[stage])
@@ -3355,7 +3361,7 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
if (nir[MESA_SHADER_TESS_CTRL] && !nir[MESA_SHADER_FRAGMENT])
ir3_key.tcs_store_primid = true;
struct tu_shader *last_shader = shaders[MESA_SHADER_GEOMETRY];
last_shader = shaders[MESA_SHADER_GEOMETRY];
if (!last_shader)
last_shader = shaders[MESA_SHADER_TESS_EVAL];
if (!last_shader)
@@ -3381,7 +3387,7 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
stage_feedbacks[stage].duration += os_time_get_nano() - stage_start;
}
uint32_t safe_constlens = ir3_trim_constlen(compiled_shaders->variants, compiler);
safe_constlens = ir3_trim_constlen(compiled_shaders->variants, compiler);
ir3_key.safe_constlen = true;
@@ -3428,9 +3434,6 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
done:;
struct ir3_shader_variant *safe_const_variants[ARRAY_SIZE(nir)] = { NULL };
nir_shader *post_link_nir[ARRAY_SIZE(nir)] = { NULL };
if (compiled_shaders) {
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
stage < ARRAY_SIZE(nir); stage = (gl_shader_stage) (stage + 1)) {
@@ -5156,6 +5159,9 @@ tu_compute_pipeline_create(VkDevice device,
TU_FROM_HANDLE(tu_pipeline_layout, layout, pCreateInfo->layout);
const VkPipelineShaderStageCreateInfo *stage_info = &pCreateInfo->stage;
VkResult result;
struct ir3_shader_variant *v = NULL;
uint32_t additional_reserve_size = 0;
uint64_t shader_iova = 0;
cache = cache ? cache : dev->mem_cache;
@@ -5273,7 +5279,7 @@ tu_compute_pipeline_create(VkDevice device,
pipeline->active_desc_sets = compiled->active_desc_sets;
struct ir3_shader_variant *v = compiled->variants[MESA_SHADER_COMPUTE];
v = compiled->variants[MESA_SHADER_COMPUTE];
tu_pipeline_set_linkage(&pipeline->program.link[MESA_SHADER_COMPUTE],
&compiled->const_state[MESA_SHADER_COMPUTE], v);
@@ -5282,7 +5288,7 @@ tu_compute_pipeline_create(VkDevice device,
if (result != VK_SUCCESS)
goto fail;
uint64_t shader_iova = tu_upload_variant(pipeline, v);
shader_iova = tu_upload_variant(pipeline, v);
struct tu_pvtmem_config pvtmem;
tu_setup_pvtmem(dev, pipeline, &pvtmem, v->pvtmem_size, v->pvtmem_per_wave);
@@ -5293,7 +5299,7 @@ tu_compute_pipeline_create(VkDevice device,
pipeline->compute.subgroup_size = v->info.subgroup_size;
struct tu_cs prog_cs;
uint32_t additional_reserve_size = tu_xs_get_additional_cs_size_dwords(v);
additional_reserve_size = tu_xs_get_additional_cs_size_dwords(v);
tu_cs_begin_sub_stream(&pipeline->cs, 64 + additional_reserve_size, &prog_cs);
tu6_emit_cs_config(&prog_cs, v, &pvtmem, shader_iova);
pipeline->program.state = tu_cs_end_draw_state(&pipeline->cs, &prog_cs);