diff --git a/src/amd/vulkan/nir/radv_nir_apply_pipeline_layout.c b/src/amd/vulkan/nir/radv_nir_apply_pipeline_layout.c index 48a98b10b4b..a8cde2cd159 100644 --- a/src/amd/vulkan/nir/radv_nir_apply_pipeline_layout.c +++ b/src/amd/vulkan/nir/radv_nir_apply_pipeline_layout.c @@ -505,7 +505,7 @@ radv_nir_apply_pipeline_layout(nir_shader *shader, struct radv_device *device, c apply_layout_state state = { .gfx_level = pdev->info.gfx_level, .address32_hi = pdev->info.address32_hi, - .disable_aniso_single_level = device->instance->drirc.disable_aniso_single_level, + .disable_aniso_single_level = pdev->instance->drirc.disable_aniso_single_level, .has_image_load_dcc_bug = pdev->info.has_image_load_dcc_bug, .disable_tg4_trunc_coord = !pdev->info.conformant_trunc_coord && !device->disable_trunc_coord, .args = &stage->args, diff --git a/src/amd/vulkan/nir/radv_nir_lower_ray_queries.c b/src/amd/vulkan/nir/radv_nir_lower_ray_queries.c index 4f36f25c1a9..26b4aab5ba1 100644 --- a/src/amd/vulkan/nir/radv_nir_lower_ray_queries.c +++ b/src/amd/vulkan/nir/radv_nir_lower_ray_queries.c @@ -711,7 +711,7 @@ radv_nir_lower_ray_queries(struct nir_shader *shader, struct radv_device *device lower_rq_generate_intersection(&builder, index, intrinsic, vars); break; case nir_intrinsic_rq_initialize: - lower_rq_initialize(&builder, index, intrinsic, vars, device->instance); + lower_rq_initialize(&builder, index, intrinsic, vars, pdev->instance); break; case nir_intrinsic_rq_load: new_dest = lower_rq_load(device, &builder, index, intrinsic, vars); diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index e9e5a4a8a7f..3427925bb8d 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -820,7 +820,7 @@ radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer, enum radv_cmd_flu radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_THREAD_TRACE_MARKER) | EVENT_INDEX(0)); } - if (device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) { + if (pdev->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) { enum rgp_flush_bits sqtt_flush_bits = 0; assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_CS_PARTIAL_FLUSH)); @@ -3872,8 +3872,8 @@ lookup_vs_prolog(struct radv_cmd_buffer *cmd_buffer, const struct radv_shader *v assert(vs_shader->info.vs.dynamic_inputs); struct radv_device *device = radv_cmd_buffer_device(cmd_buffer); - const struct radv_vs_input_state *state = &cmd_buffer->state.dynamic_vs_input; const struct radv_physical_device *pdev = radv_device_physical(device); + const struct radv_vs_input_state *state = &cmd_buffer->state.dynamic_vs_input; unsigned num_attributes = util_last_bit(vs_shader->info.vs.vb_desc_usage_mask); uint32_t attribute_mask = BITFIELD_MASK(num_attributes); @@ -4186,10 +4186,11 @@ static void radv_emit_alpha_to_coverage_enable(struct radv_cmd_buffer *cmd_buffer) { struct radv_device *device = radv_cmd_buffer_device(cmd_buffer); + const struct radv_physical_device *pdev = radv_device_physical(device); const struct radv_dynamic_state *d = &cmd_buffer->state.dynamic; unsigned db_alpha_to_mask = 0; - if (device->instance->debug_flags & RADV_DEBUG_NO_ATOC_DITHERING) { + if (pdev->instance->debug_flags & RADV_DEBUG_NO_ATOC_DITHERING) { db_alpha_to_mask = S_028B70_ALPHA_TO_MASK_OFFSET0(2) | S_028B70_ALPHA_TO_MASK_OFFSET1(2) | S_028B70_ALPHA_TO_MASK_OFFSET2(2) | S_028B70_ALPHA_TO_MASK_OFFSET3(2) | S_028B70_OFFSET_ROUND(0); @@ -6116,7 +6117,7 @@ radv_bind_descriptor_sets(struct radv_cmd_buffer *cmd_buffer, RADV_FROM_HANDLE(radv_pipeline_layout, layout, pBindDescriptorSetsInfo->layout); struct radv_device *device = radv_cmd_buffer_device(cmd_buffer); const struct radv_physical_device *pdev = radv_device_physical(device); - const bool no_dynamic_bounds = device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS; + const bool no_dynamic_bounds = pdev->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS; struct radv_descriptor_state *descriptors_state = radv_get_descriptors_state(cmd_buffer, bind_point); unsigned dyn_idx = 0; @@ -10693,7 +10694,7 @@ radv_trace_rays(struct radv_cmd_buffer *cmd_buffer, VkTraceRaysIndirectCommand2K struct radv_device *device = radv_cmd_buffer_device(cmd_buffer); const struct radv_physical_device *pdev = radv_device_physical(device); - if (device->instance->debug_flags & RADV_DEBUG_NO_RT) + if (pdev->instance->debug_flags & RADV_DEBUG_NO_RT) return; if (unlikely(device->rra_trace.ray_history_buffer)) diff --git a/src/amd/vulkan/radv_debug.c b/src/amd/vulkan/radv_debug.c index cb8cdb994da..070b93a5d67 100644 --- a/src/amd/vulkan/radv_debug.c +++ b/src/amd/vulkan/radv_debug.c @@ -470,6 +470,7 @@ static void radv_dump_queue_state(struct radv_queue *queue, const char *dump_dir, FILE *f) { struct radv_device *device = radv_queue_device(queue); + const struct radv_physical_device *pdev = radv_device_physical(device); enum amd_ip_type ring = radv_queue_ring(queue); struct radv_pipeline *pipeline; @@ -508,8 +509,7 @@ radv_dump_queue_state(struct radv_queue *queue, const char *dump_dir, FILE *f) MESA_SHADER_COMPUTE, dump_dir, f); } - if (!(device->instance->debug_flags & RADV_DEBUG_NO_UMR)) { - const struct radv_physical_device *pdev = radv_device_physical(device); + if (!(pdev->instance->debug_flags & RADV_DEBUG_NO_UMR)) { struct ac_wave_info waves[AC_MAX_WAVES_PER_CHIP]; enum amd_gfx_level gfx_level = pdev->info.gfx_level; unsigned num_waves = ac_get_wave_info(gfx_level, &pdev->info, waves); @@ -596,12 +596,13 @@ radv_dump_dmesg(FILE *f) void radv_dump_enabled_options(const struct radv_device *device, FILE *f) { + const struct radv_physical_device *pdev = radv_device_physical(device); uint64_t mask; - if (device->instance->debug_flags) { + if (pdev->instance->debug_flags) { fprintf(f, "Enabled debug options: "); - mask = device->instance->debug_flags; + mask = pdev->instance->debug_flags; while (mask) { int i = u_bit_scan64(&mask); fprintf(f, "%s, ", radv_get_debug_option_name(i)); @@ -609,10 +610,10 @@ radv_dump_enabled_options(const struct radv_device *device, FILE *f) fprintf(f, "\n"); } - if (device->instance->perftest_flags) { + if (pdev->instance->perftest_flags) { fprintf(f, "Enabled perftest options: "); - mask = device->instance->perftest_flags; + mask = pdev->instance->perftest_flags; while (mask) { int i = u_bit_scan64(&mask); fprintf(f, "%s, ", radv_get_perftest_option_name(i)); @@ -624,7 +625,8 @@ radv_dump_enabled_options(const struct radv_device *device, FILE *f) static void radv_dump_app_info(const struct radv_device *device, FILE *f) { - const struct radv_instance *instance = device->instance; + const struct radv_physical_device *pdev = radv_device_physical(device); + const struct radv_instance *instance = pdev->instance; fprintf(f, "Application name: %s\n", instance->vk.app_info.app_name); fprintf(f, "Application version: %d\n", instance->vk.app_info.app_version); @@ -812,11 +814,11 @@ radv_check_gpu_hangs(struct radv_queue *queue, const struct radv_winsys_submit_i radv_dump_queue_state(queue, dump_dir, f); break; case RADV_DEVICE_FAULT_CHUNK_UMR_WAVES: - if (!(device->instance->debug_flags & RADV_DEBUG_NO_UMR)) + if (!(pdev->instance->debug_flags & RADV_DEBUG_NO_UMR)) radv_dump_umr_waves(queue, f); break; case RADV_DEVICE_FAULT_CHUNK_UMR_RING: - if (!(device->instance->debug_flags & RADV_DEBUG_NO_UMR)) + if (!(pdev->instance->debug_flags & RADV_DEBUG_NO_UMR)) radv_dump_umr_ring(queue, f); break; case RADV_DEVICE_FAULT_CHUNK_REGISTERS: diff --git a/src/amd/vulkan/radv_debug.h b/src/amd/vulkan/radv_debug.h index b4f0776593e..40825b1b737 100644 --- a/src/amd/vulkan/radv_debug.h +++ b/src/amd/vulkan/radv_debug.h @@ -114,7 +114,8 @@ bool radv_vm_fault_occurred(struct radv_device *device, struct radv_winsys_gpuvm ALWAYS_INLINE static bool radv_device_fault_detection_enabled(const struct radv_device *device) { - return device->instance->debug_flags & RADV_DEBUG_HANG; + const struct radv_physical_device *pdev = radv_device_physical(device); + return pdev->instance->debug_flags & RADV_DEBUG_HANG; } #endif diff --git a/src/amd/vulkan/radv_descriptor_set.c b/src/amd/vulkan/radv_descriptor_set.c index 7485bdc7164..49a1b35bda2 100644 --- a/src/amd/vulkan/radv_descriptor_set.c +++ b/src/amd/vulkan/radv_descriptor_set.c @@ -907,10 +907,12 @@ radv_create_descriptor_pool(struct radv_device *device, const VkDescriptorPoolCr } if (bo_size) { + const struct radv_physical_device *pdev = radv_device_physical(device); + if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT)) { enum radeon_bo_flag flags = RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT; - if (device->instance->drirc.zero_vram) + if (pdev->instance->drirc.zero_vram) flags |= RADEON_FLAG_ZERO_VRAM; VkResult result = radv_bo_create(device, bo_size, 32, RADEON_DOMAIN_VRAM, flags, RADV_BO_PRIORITY_DESCRIPTOR, diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index 6e4a8654384..510cb89dcb5 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -192,7 +192,7 @@ radv_device_init_vs_prologs(struct radv_device *device) return vk_error(pdev->instance, VK_ERROR_OUT_OF_HOST_MEMORY); /* don't pre-compile prologs if we want to print them */ - if (device->instance->debug_flags & RADV_DEBUG_DUMP_PROLOGS) + if (pdev->instance->debug_flags & RADV_DEBUG_DUMP_PROLOGS) return VK_SUCCESS; struct radv_vs_prolog_key key; @@ -603,10 +603,11 @@ capture_trace(VkQueue _queue) { RADV_FROM_HANDLE(radv_queue, queue, _queue); struct radv_device *device = radv_queue_device(queue); + const struct radv_physical_device *pdev = radv_device_physical(device); VkResult result = VK_SUCCESS; - if (device->instance->vk.trace_mode & RADV_TRACE_MODE_RRA) + if (pdev->instance->vk.trace_mode & RADV_TRACE_MODE_RRA) device->rra_trace.triggered = true; if (device->vk.memory_trace_data.is_enabled) { @@ -616,10 +617,10 @@ capture_trace(VkQueue _queue) simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx); } - if (device->instance->vk.trace_mode & RADV_TRACE_MODE_RGP) + if (pdev->instance->vk.trace_mode & RADV_TRACE_MODE_RGP) device->sqtt_triggered = true; - if (device->instance->vk.trace_mode & RADV_TRACE_MODE_CTX_ROLLS) { + if (pdev->instance->vk.trace_mode & RADV_TRACE_MODE_CTX_ROLLS) { char filename[2048]; time_t t = time(NULL); struct tm now = *localtime(&t); @@ -703,8 +704,6 @@ radv_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCr device->vk.command_buffer_ops = &radv_cmd_buffer_ops; - device->instance = pdev->instance; - init_dispatch_tables(device, pdev); simple_mtx_init(&device->ctx_roll_mtx, mtx_plain); @@ -722,7 +721,7 @@ radv_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCr * from the descriptor set anymore, so we have to use a global BO list. */ device->use_global_bo_list = - (device->instance->perftest_flags & RADV_PERFTEST_BO_LIST) || device->vk.enabled_features.bufferDeviceAddress || + (pdev->instance->perftest_flags & RADV_PERFTEST_BO_LIST) || device->vk.enabled_features.bufferDeviceAddress || device->vk.enabled_features.descriptorIndexing || device->vk.enabled_extensions.EXT_descriptor_indexing || device->vk.enabled_extensions.EXT_buffer_device_address || device->vk.enabled_extensions.KHR_buffer_device_address || @@ -739,7 +738,7 @@ radv_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCr device->overallocation_disallowed = overallocation_disallowed; mtx_init(&device->overallocation_mutex, mtx_plain); - if (pdev->info.register_shadowing_required || device->instance->debug_flags & RADV_DEBUG_SHADOW_REGS) + if (pdev->info.register_shadowing_required || pdev->instance->debug_flags & RADV_DEBUG_SHADOW_REGS) device->uses_shadow_regs = true; /* Create one context per queue priority. */ @@ -782,22 +781,22 @@ radv_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCr } device->private_sdma_queue = VK_NULL_HANDLE; - device->shader_use_invisible_vram = (device->instance->perftest_flags & RADV_PERFTEST_DMA_SHADERS) && + device->shader_use_invisible_vram = (pdev->instance->perftest_flags & RADV_PERFTEST_DMA_SHADERS) && /* SDMA buffer copy is only implemented for GFX7+. */ pdev->info.gfx_level >= GFX7; result = radv_init_shader_upload_queue(device); if (result != VK_SUCCESS) goto fail; - device->pbb_allowed = pdev->info.gfx_level >= GFX9 && !(device->instance->debug_flags & RADV_DEBUG_NOBINNING); + device->pbb_allowed = pdev->info.gfx_level >= GFX9 && !(pdev->instance->debug_flags & RADV_DEBUG_NOBINNING); - device->disable_trunc_coord = device->instance->drirc.disable_trunc_coord; + device->disable_trunc_coord = pdev->instance->drirc.disable_trunc_coord; - if (device->instance->vk.app_info.engine_name && !strcmp(device->instance->vk.app_info.engine_name, "DXVK")) { + if (pdev->instance->vk.app_info.engine_name && !strcmp(pdev->instance->vk.app_info.engine_name, "DXVK")) { /* For DXVK 2.3.0 and older, use dualSrcBlend to determine if this is D3D9. */ bool is_d3d9 = !device->vk.enabled_features.dualSrcBlend; - if (device->instance->vk.app_info.engine_version > VK_MAKE_VERSION(2, 3, 0)) - is_d3d9 = device->instance->vk.app_info.app_version & 0x1; + if (pdev->instance->vk.app_info.engine_version > VK_MAKE_VERSION(2, 3, 0)) + is_d3d9 = pdev->instance->vk.app_info.app_version & 0x1; device->disable_trunc_coord &= !is_d3d9; } @@ -857,12 +856,12 @@ radv_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCr /* Wait for idle after every draw/dispatch to identify the * first bad call. */ - device->instance->debug_flags |= RADV_DEBUG_SYNC_SHADERS; + pdev->instance->debug_flags |= RADV_DEBUG_SYNC_SHADERS; radv_dump_enabled_options(device, stderr); } - if (device->instance->vk.trace_mode & RADV_TRACE_MODE_RGP) { + if (pdev->instance->vk.trace_mode & RADV_TRACE_MODE_RGP) { if (pdev->info.gfx_level < GFX8 || pdev->info.gfx_level > GFX11) { fprintf(stderr, "GPU hardware not supported: refer to " "the RGP documentation for the list of " @@ -879,10 +878,10 @@ radv_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCr "radv: Thread trace support is enabled (initial buffer size: %u MiB, " "instruction timing: %s, cache counters: %s, queue events: %s).\n", device->sqtt.buffer_size / (1024 * 1024), radv_is_instruction_timing_enabled() ? "enabled" : "disabled", - radv_spm_trace_enabled(device->instance) ? "enabled" : "disabled", + radv_spm_trace_enabled(pdev->instance) ? "enabled" : "disabled", radv_sqtt_queue_events_enabled() ? "enabled" : "disabled"); - if (radv_spm_trace_enabled(device->instance)) { + if (radv_spm_trace_enabled(pdev->instance)) { if (pdev->info.gfx_level >= GFX10) { if (!radv_spm_init(device)) { result = VK_ERROR_INITIALIZATION_FAILED; @@ -981,7 +980,7 @@ radv_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCr } } - if (!(device->instance->debug_flags & RADV_DEBUG_NO_IBS)) + if (!(pdev->instance->debug_flags & RADV_DEBUG_NO_IBS)) radv_create_gfx_config(device); struct vk_pipeline_cache_create_info info = {.weak_ref = true}; @@ -1016,7 +1015,7 @@ radv_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCr } } - if ((device->instance->vk.trace_mode & RADV_TRACE_MODE_RRA) && radv_enable_rt(pdev, false)) { + if ((pdev->instance->vk.trace_mode & RADV_TRACE_MODE_RRA) && radv_enable_rt(pdev, false)) { result = radv_rra_trace_init(device); if (result != VK_SUCCESS) goto fail; @@ -1031,7 +1030,7 @@ radv_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCr goto fail_cache; if (pdev->info.gfx_level == GFX11 && pdev->info.has_dedicated_vram && - device->instance->drirc.force_pstate_peak_gfx11_dgpu) { + pdev->instance->drirc.force_pstate_peak_gfx11_dgpu) { if (!radv_device_acquire_performance_counters(device)) fprintf(stderr, "radv: failed to set pstate to profile_peak.\n"); } @@ -1530,7 +1529,7 @@ radv_initialise_color_surface(struct radv_device *device, struct radv_color_buff } } - if (radv_image_has_cmask(iview->image) && !(device->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS)) + if (radv_image_has_cmask(iview->image) && !(pdev->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS)) cb->cb_color_info |= S_028C70_FAST_CLEAR(1); if (radv_dcc_enabled(iview->image, iview->vk.base_mip_level) && !iview->disable_dcc_mrt && diff --git a/src/amd/vulkan/radv_device_memory.c b/src/amd/vulkan/radv_device_memory.c index 7cbe7300511..294d917f70a 100644 --- a/src/amd/vulkan/radv_device_memory.c +++ b/src/amd/vulkan/radv_device_memory.c @@ -223,7 +223,7 @@ radv_alloc_memory(struct radv_device *device, const VkMemoryAllocateInfo *pAlloc if (flags_info && flags_info->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT) flags |= RADEON_FLAG_REPLAYABLE; - if (device->instance->drirc.zero_vram) + if (pdev->instance->drirc.zero_vram) flags |= RADEON_FLAG_ZERO_VRAM; if (device->overallocation_disallowed) { diff --git a/src/amd/vulkan/radv_image.c b/src/amd/vulkan/radv_image.c index 722fc7da2ff..cac679595f9 100644 --- a/src/amd/vulkan/radv_image.c +++ b/src/amd/vulkan/radv_image.c @@ -141,7 +141,9 @@ radv_surface_has_scanout(struct radv_device *device, const struct radv_image_cre static bool radv_image_use_fast_clear_for_image_early(const struct radv_device *device, const struct radv_image *image) { - if (device->instance->debug_flags & RADV_DEBUG_FORCE_COMPRESS) + const struct radv_physical_device *pdev = radv_device_physical(device); + + if (pdev->instance->debug_flags & RADV_DEBUG_FORCE_COMPRESS) return true; if (image->vk.samples <= 1 && image->vk.extent.width * image->vk.extent.height <= 512 * 512) { @@ -159,7 +161,9 @@ radv_image_use_fast_clear_for_image_early(const struct radv_device *device, cons static bool radv_image_use_fast_clear_for_image(const struct radv_device *device, const struct radv_image *image) { - if (device->instance->debug_flags & RADV_DEBUG_FORCE_COMPRESS) + const struct radv_physical_device *pdev = radv_device_physical(device); + + if (pdev->instance->debug_flags & RADV_DEBUG_FORCE_COMPRESS) return true; return radv_image_use_fast_clear_for_image_early(device, image) && (image->exclusive || @@ -251,7 +255,7 @@ radv_use_dcc_for_image_early(struct radv_device *device, struct radv_image *imag const VkImageCompressionControlEXT *compression = vk_find_struct_const(pCreateInfo->pNext, IMAGE_COMPRESSION_CONTROL_EXT); - if (device->instance->debug_flags & RADV_DEBUG_NO_DCC || + if (pdev->instance->debug_flags & RADV_DEBUG_NO_DCC || (compression && compression->flags == VK_IMAGE_COMPRESSION_DISABLED_EXT)) { return false; } @@ -296,8 +300,7 @@ radv_use_dcc_for_image_early(struct radv_device *device, struct radv_image *imag } /* DCC MSAA can't work on GFX10.3 and earlier without FMASK. */ - if (pCreateInfo->samples > 1 && pdev->info.gfx_level < GFX11 && - (device->instance->debug_flags & RADV_DEBUG_NO_FMASK)) + if (pCreateInfo->samples > 1 && pdev->info.gfx_level < GFX11 && (pdev->instance->debug_flags & RADV_DEBUG_NO_FMASK)) return false; return radv_are_formats_dcc_compatible(pdev, pCreateInfo->pNext, format, pCreateInfo->flags, sign_reinterpret); @@ -363,7 +366,7 @@ radv_use_fmask_for_image(const struct radv_device *device, const struct radv_ima return pdev->use_fmask && image->vk.samples > 1 && ((image->vk.usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) || - (device->instance->debug_flags & RADV_DEBUG_FORCE_COMPRESS)); + (pdev->instance->debug_flags & RADV_DEBUG_FORCE_COMPRESS)); } static inline bool @@ -376,7 +379,7 @@ radv_use_htile_for_image(const struct radv_device *device, const struct radv_ima const VkImageCompressionControlEXT *compression = vk_find_struct_const(pCreateInfo->pNext, IMAGE_COMPRESSION_CONTROL_EXT); - if (device->instance->debug_flags & RADV_DEBUG_NO_HIZ || + if (pdev->instance->debug_flags & RADV_DEBUG_NO_HIZ || (compression && compression->flags == VK_IMAGE_COMPRESSION_DISABLED_EXT)) return false; @@ -394,7 +397,7 @@ radv_use_htile_for_image(const struct radv_device *device, const struct radv_ima * allowed with VRS attachments because we need HTILE on GFX10.3. */ if (image->vk.extent.width * image->vk.extent.height < 8 * 8 && - !(device->instance->debug_flags & RADV_DEBUG_FORCE_COMPRESS) && + !(pdev->instance->debug_flags & RADV_DEBUG_FORCE_COMPRESS) && !(gfx_level == GFX10_3 && device->vk.enabled_features.attachmentFragmentShadingRate)) return false; @@ -414,7 +417,7 @@ radv_use_tc_compat_cmask_for_image(struct radv_device *device, struct radv_image if (pdev->info.gfx_level == GFX9 && image->vk.samples > 2) return false; - if (device->instance->debug_flags & RADV_DEBUG_NO_TC_COMPAT_CMASK) + if (pdev->instance->debug_flags & RADV_DEBUG_NO_TC_COMPAT_CMASK) return false; /* TC-compat CMASK with storage images is supported on GFX10+. */ @@ -564,7 +567,7 @@ radv_patch_image_from_extra_info(struct radv_device *device, struct radv_image * if (radv_surface_has_scanout(device, create_info)) { image->planes[plane].surface.flags |= RADEON_SURF_SCANOUT; - if (device->instance->debug_flags & RADV_DEBUG_NO_DISPLAY_DCC) + if (pdev->instance->debug_flags & RADV_DEBUG_NO_DISPLAY_DCC) image->planes[plane].surface.flags |= RADEON_SURF_DISABLE_DCC; image_info->surf_index = NULL; @@ -773,7 +776,7 @@ radv_query_opaque_metadata(struct radv_device *device, struct radv_image *image, false, desc, NULL); ac_surface_compute_umd_metadata(&pdev->info, surface, image->vk.mip_levels, desc, &md->size_metadata, md->metadata, - device->instance->debug_flags & RADV_DEBUG_EXTRA_MD); + pdev->instance->debug_flags & RADV_DEBUG_EXTRA_MD); } void @@ -956,7 +959,7 @@ radv_image_can_fast_clear(const struct radv_device *device, const struct radv_im { const struct radv_physical_device *pdev = radv_device_physical(device); - if (device->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS) + if (pdev->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS) return false; if (vk_format_is_color(image->vk.format)) { @@ -1357,7 +1360,7 @@ radv_image_create(VkDevice _device, const struct radv_image_create_info *create_ } } - if (device->instance->debug_flags & RADV_DEBUG_IMG) { + if (pdev->instance->debug_flags & RADV_DEBUG_IMG) { radv_image_print_info(device, image); } @@ -1438,7 +1441,7 @@ radv_layout_is_htile_compressed(const struct radv_device *device, const struct r * the number of decompressions from/to GENERAL. */ if (radv_image_is_tc_compat_htile(image) && queue_mask & (1u << RADV_QUEUE_GENERAL) && - !device->instance->drirc.disable_tc_compat_htile_in_general) { + !pdev->instance->drirc.disable_tc_compat_htile_in_general) { return true; } else { return false; diff --git a/src/amd/vulkan/radv_image_view.c b/src/amd/vulkan/radv_image_view.c index e778cbcb8aa..e425408424b 100644 --- a/src/amd/vulkan/radv_image_view.c +++ b/src/amd/vulkan/radv_image_view.c @@ -505,7 +505,7 @@ gfx6_make_texture_descriptor(struct radv_device *device, struct radv_image *imag if (!(image->planes[0].surface.flags & RADEON_SURF_Z_OR_SBUFFER) && image->planes[0].surface.meta_offset) { state[6] = S_008F28_ALPHA_IS_ON_MSB(vi_alpha_is_on_msb(device, vk_format)); } else { - if (device->instance->drirc.disable_aniso_single_level) { + if (pdev->instance->drirc.disable_aniso_single_level) { /* The last dword is unused by hw. The shader uses it to clear * bits in the first dword of sampler state. */ diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c index f51a30dcc1d..dbb929cba43 100644 --- a/src/amd/vulkan/radv_pipeline.c +++ b/src/amd/vulkan/radv_pipeline.c @@ -63,15 +63,17 @@ radv_shader_need_indirect_descriptor_sets(const struct radv_shader *shader) bool radv_pipeline_capture_shaders(const struct radv_device *device, VkPipelineCreateFlags2KHR flags) { + const struct radv_physical_device *pdev = radv_device_physical(device); return (flags & VK_PIPELINE_CREATE_2_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR) || - (device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS) || device->keep_shader_info; + (pdev->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS) || device->keep_shader_info; } bool radv_pipeline_capture_shader_stats(const struct radv_device *device, VkPipelineCreateFlags2KHR flags) { + const struct radv_physical_device *pdev = radv_device_physical(device); return (flags & VK_PIPELINE_CREATE_2_CAPTURE_STATISTICS_BIT_KHR) || - (device->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS) || device->keep_shader_info; + (pdev->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS) || device->keep_shader_info; } void @@ -147,6 +149,7 @@ struct radv_shader_stage_key radv_pipeline_get_shader_key(const struct radv_device *device, const VkPipelineShaderStageCreateInfo *stage, VkPipelineCreateFlags2KHR flags, const void *pNext) { + const struct radv_physical_device *pdev = radv_device_physical(device); gl_shader_stage s = vk_to_mesa_shader_stage(stage->stage); struct radv_shader_stage_key key = {0}; @@ -156,12 +159,12 @@ radv_pipeline_get_shader_key(const struct radv_device *device, const VkPipelineS key.optimisations_disabled = 1; if (stage->stage & RADV_GRAPHICS_STAGE_BITS) { - key.version = device->instance->drirc.override_graphics_shader_version; + key.version = pdev->instance->drirc.override_graphics_shader_version; } else if (stage->stage & RADV_RT_STAGE_BITS) { - key.version = device->instance->drirc.override_ray_tracing_shader_version; + key.version = pdev->instance->drirc.override_ray_tracing_shader_version; } else { assert(stage->stage == VK_SHADER_STAGE_COMPUTE_BIT); - key.version = device->instance->drirc.override_compute_shader_version; + key.version = pdev->instance->drirc.override_compute_shader_version; } const VkPipelineRobustnessCreateInfoEXT *pipeline_robust_info = @@ -525,7 +528,7 @@ radv_postprocess_nir(struct radv_device *device, const struct radv_graphics_stat NIR_PASS(progress, stage->nir, nir_opt_load_store_vectorize, &vectorize_opts); if (progress) { NIR_PASS(_, stage->nir, nir_copy_prop); - NIR_PASS(_, stage->nir, nir_opt_shrink_stores, !device->instance->drirc.disable_shrink_image_store); + NIR_PASS(_, stage->nir, nir_opt_shrink_stores, !pdev->instance->drirc.disable_shrink_image_store); /* Gather info again, to update whether 8/16-bit are used. */ nir_shader_gather_info(stage->nir, nir_shader_get_entrypoint(stage->nir)); @@ -736,9 +739,10 @@ radv_postprocess_nir(struct radv_device *device, const struct radv_graphics_stat bool radv_shader_should_clear_lds(const struct radv_device *device, const nir_shader *shader) { + const struct radv_physical_device *pdev = radv_device_physical(device); return (shader->info.stage == MESA_SHADER_COMPUTE || shader->info.stage == MESA_SHADER_MESH || shader->info.stage == MESA_SHADER_TASK) && - shader->info.shared_size > 0 && device->instance->drirc.clear_lds; + shader->info.shared_size > 0 && pdev->instance->drirc.clear_lds; } static uint32_t diff --git a/src/amd/vulkan/radv_pipeline_cache.c b/src/amd/vulkan/radv_pipeline_cache.c index 312e54bd5ee..1df890719dc 100644 --- a/src/amd/vulkan/radv_pipeline_cache.c +++ b/src/amd/vulkan/radv_pipeline_cache.c @@ -47,7 +47,7 @@ radv_is_cache_disabled(struct radv_device *device) /* Pipeline caches can be disabled with RADV_DEBUG=nocache, with MESA_GLSL_CACHE_DISABLE=1 and * when ACO_DEBUG is used. MESA_GLSL_CACHE_DISABLE is done elsewhere. */ - return (device->instance->debug_flags & RADV_DEBUG_NO_CACHE) || (pdev->use_llvm ? 0 : aco_get_codegen_flags()); + return (pdev->instance->debug_flags & RADV_DEBUG_NO_CACHE) || (pdev->use_llvm ? 0 : aco_get_codegen_flags()); } void diff --git a/src/amd/vulkan/radv_pipeline_graphics.c b/src/amd/vulkan/radv_pipeline_graphics.c index 578416ffbd5..aa3d3a71795 100644 --- a/src/amd/vulkan/radv_pipeline_graphics.c +++ b/src/amd/vulkan/radv_pipeline_graphics.c @@ -1737,7 +1737,7 @@ radv_generate_ps_epilog_key(const struct radv_device *device, const struct radv_ key.spi_shader_col_format = col_format; key.color_is_int8 = pdev->info.gfx_level < GFX8 ? is_int8 : 0; key.color_is_int10 = pdev->info.gfx_level < GFX8 ? is_int10 : 0; - key.enable_mrt_output_nan_fixup = device->instance->drirc.enable_mrt_output_nan_fixup ? is_float32 : 0; + key.enable_mrt_output_nan_fixup = pdev->instance->drirc.enable_mrt_output_nan_fixup ? is_float32 : 0; key.colors_written = state->colors_written; key.mrt0_is_dual_src = state->mrt0_is_dual_src; key.export_depth = state->export_depth; @@ -2037,7 +2037,7 @@ radv_fill_shader_info_ngg(struct radv_device *device, struct radv_shader_stage * } if ((last_vgt_stage && last_vgt_stage->nir->xfb_info) || - ((device->instance->debug_flags & RADV_DEBUG_NO_NGG_GS) && stages[MESA_SHADER_GEOMETRY].nir)) { + ((pdev->instance->debug_flags & RADV_DEBUG_NO_NGG_GS) && stages[MESA_SHADER_GEOMETRY].nir)) { /* NGG needs to be disabled on GFX10/GFX10.3 when: * - streamout is used because NGG streamout isn't supported * - NGG GS is explictly disabled to workaround performance issues @@ -2500,7 +2500,7 @@ radv_graphics_shaders_compile(struct radv_device *device, struct vk_pipeline_cac struct radv_shader **gs_copy_shader, struct radv_shader_binary **gs_copy_binary) { const struct radv_physical_device *pdev = radv_device_physical(device); - const bool nir_cache = device->instance->perftest_flags & RADV_PERFTEST_NIR_CACHE; + const bool nir_cache = pdev->instance->perftest_flags & RADV_PERFTEST_NIR_CACHE; for (unsigned s = 0; s < MESA_VULKAN_SHADER_STAGES; s++) { if (!stages[s].entrypoint) continue; @@ -2512,7 +2512,7 @@ radv_graphics_shaders_compile(struct radv_device *device, struct vk_pipeline_cac struct radv_spirv_to_nir_options options = { .lower_view_index_to_zero = !gfx_state->has_multiview_view_index, .fix_dual_src_mrt1_export = - gfx_state->ps.epilog.mrt0_is_dual_src && device->instance->drirc.dual_color_blend_by_location, + gfx_state->ps.epilog.mrt0_is_dual_src && pdev->instance->drirc.dual_color_blend_by_location, }; blake3_hash key; @@ -2663,12 +2663,14 @@ static bool radv_should_compute_pipeline_hash(const struct radv_device *device, const struct radv_graphics_pipeline *pipeline, bool fast_linking_enabled) { + const struct radv_physical_device *pdev = radv_device_physical(device); + /* Skip computing the pipeline hash when GPL fast-linking is enabled because these shaders aren't * supposed to be cached and computing the hash is costly. Though, make sure it's always computed * when RGP is enabled, otherwise ISA isn't reported. */ return !fast_linking_enabled || - ((device->instance->vk.trace_mode & RADV_TRACE_MODE_RGP) && pipeline->base.type == RADV_PIPELINE_GRAPHICS); + ((pdev->instance->vk.trace_mode & RADV_TRACE_MODE_RGP) && pipeline->base.type == RADV_PIPELINE_GRAPHICS); } static VkResult @@ -3732,7 +3734,7 @@ gfx103_pipeline_vrs_coarse_shading(const struct radv_device *device, const struc if (pdev->info.gfx_level != GFX10_3) return false; - if (device->instance->debug_flags & RADV_DEBUG_NO_VRS_FLAT_SHADING) + if (pdev->instance->debug_flags & RADV_DEBUG_NO_VRS_FLAT_SHADING) return false; if (ps && !ps->info.ps.allow_flat_shading) diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h index 1d177adcec5..11faa551bf7 100644 --- a/src/amd/vulkan/radv_private.h +++ b/src/amd/vulkan/radv_private.h @@ -1105,7 +1105,6 @@ void radv_device_associate_nir(struct radv_device *device, nir_shader *nir); struct radv_device { struct vk_device vk; - struct radv_instance *instance; struct radeon_winsys *ws; struct radv_layer_dispatch_tables layer_dispatch; diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c index addcc67571c..dc088d64933 100644 --- a/src/amd/vulkan/radv_query.c +++ b/src/amd/vulkan/radv_query.c @@ -1751,7 +1751,7 @@ radv_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPoo /* Workaround engines that forget to properly specify WAIT_BIT because some driver implicitly * synchronizes before query copy. */ - if (device->instance->drirc.flush_before_query_copy) + if (pdev->instance->drirc.flush_before_query_copy) cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits; /* From the Vulkan spec 1.1.108: @@ -2607,7 +2607,7 @@ radv_CmdWriteTimestamp2(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 sta radv_cs_add_buffer(device->ws, cs, pool->bo); if (cmd_buffer->qf == RADV_QUEUE_TRANSFER) { - if (device->instance->drirc.flush_before_timestamp_write) { + if (pdev->instance->drirc.flush_before_timestamp_write) { radeon_check_space(device->ws, cmd_buffer->cs, 1); radeon_emit(cmd_buffer->cs, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); } @@ -2621,7 +2621,7 @@ radv_CmdWriteTimestamp2(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 sta return; } - if (device->instance->drirc.flush_before_timestamp_write) { + if (pdev->instance->drirc.flush_before_timestamp_write) { /* Make sure previously launched waves have finished */ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_CS_PARTIAL_FLUSH; } diff --git a/src/amd/vulkan/radv_sampler.c b/src/amd/vulkan/radv_sampler.c index ab1dd632e73..ffa988146fd 100644 --- a/src/amd/vulkan/radv_sampler.c +++ b/src/amd/vulkan/radv_sampler.c @@ -248,12 +248,12 @@ radv_init_sampler(struct radv_device *device, struct radv_sampler *sampler, cons if (pdev->info.gfx_level >= GFX10) { sampler->state[2] |= S_008F38_LOD_BIAS(radv_float_to_sfixed(CLAMP(pCreateInfo->mipLodBias, -32, 31), 8)) | - S_008F38_ANISO_OVERRIDE_GFX10(device->instance->drirc.disable_aniso_single_level); + S_008F38_ANISO_OVERRIDE_GFX10(pdev->instance->drirc.disable_aniso_single_level); } else { - sampler->state[2] |= S_008F38_LOD_BIAS(radv_float_to_sfixed(CLAMP(pCreateInfo->mipLodBias, -16, 16), 8)) | - S_008F38_DISABLE_LSB_CEIL(pdev->info.gfx_level <= GFX8) | S_008F38_FILTER_PREC_FIX(1) | - S_008F38_ANISO_OVERRIDE_GFX8(device->instance->drirc.disable_aniso_single_level && - pdev->info.gfx_level >= GFX8); + sampler->state[2] |= + S_008F38_LOD_BIAS(radv_float_to_sfixed(CLAMP(pCreateInfo->mipLodBias, -16, 16), 8)) | + S_008F38_DISABLE_LSB_CEIL(pdev->info.gfx_level <= GFX8) | S_008F38_FILTER_PREC_FIX(1) | + S_008F38_ANISO_OVERRIDE_GFX8(pdev->instance->drirc.disable_aniso_single_level && pdev->info.gfx_level >= GFX8); } if (pdev->info.gfx_level >= GFX11) { diff --git a/src/amd/vulkan/radv_shader.c b/src/amd/vulkan/radv_shader.c index f310246d479..70023b50c99 100644 --- a/src/amd/vulkan/radv_shader.c +++ b/src/amd/vulkan/radv_shader.c @@ -108,10 +108,12 @@ is_meta_shader(nir_shader *nir) bool radv_can_dump_shader(struct radv_device *device, nir_shader *nir, bool meta_shader) { - if (!(device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS)) + const struct radv_physical_device *pdev = radv_device_physical(device); + + if (!(pdev->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS)) return false; - if ((is_meta_shader(nir) || meta_shader) && !(device->instance->debug_flags & RADV_DEBUG_DUMP_META_SHADERS)) + if ((is_meta_shader(nir) || meta_shader) && !(pdev->instance->debug_flags & RADV_DEBUG_DUMP_META_SHADERS)) return false; return true; @@ -120,8 +122,10 @@ radv_can_dump_shader(struct radv_device *device, nir_shader *nir, bool meta_shad bool radv_can_dump_shader_stats(struct radv_device *device, nir_shader *nir) { + const struct radv_physical_device *pdev = radv_device_physical(device); + /* Only dump non-meta shader stats. */ - return device->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS && !is_meta_shader(nir); + return pdev->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS && !is_meta_shader(nir); } void @@ -251,7 +255,8 @@ static void radv_spirv_nir_debug(void *private_data, enum nir_spirv_debug_level level, size_t spirv_offset, const char *message) { struct radv_shader_debug_data *debug_data = private_data; - struct radv_instance *instance = debug_data->device->instance; + const struct radv_physical_device *pdev = radv_device_physical(debug_data->device); + struct radv_instance *instance = pdev->instance; static const VkDebugReportFlagsEXT vk_flags[] = { [NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT, @@ -269,7 +274,8 @@ static void radv_compiler_debug(void *private_data, enum aco_compiler_debug_level level, const char *message) { struct radv_shader_debug_data *debug_data = private_data; - struct radv_instance *instance = debug_data->device->instance; + const struct radv_physical_device *pdev = radv_device_physical(debug_data->device); + struct radv_instance *instance = pdev->instance; static const VkDebugReportFlagsEXT vk_flags[] = { [ACO_COMPILER_DEBUG_LEVEL_PERFWARN] = VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, @@ -331,8 +337,8 @@ radv_shader_spirv_to_nir(struct radv_device *device, const struct radv_shader_st uint32_t *spirv = (uint32_t *)stage->spirv.data; assert(stage->spirv.size % 4 == 0); - bool dump_meta = device->instance->debug_flags & RADV_DEBUG_DUMP_META_SHADERS; - if ((device->instance->debug_flags & RADV_DEBUG_DUMP_SPIRV) && (!is_internal || dump_meta)) + bool dump_meta = pdev->instance->debug_flags & RADV_DEBUG_DUMP_META_SHADERS; + if ((pdev->instance->debug_flags & RADV_DEBUG_DUMP_SPIRV) && (!is_internal || dump_meta)) radv_print_spirv(stage->spirv.data, stage->spirv.size, stderr); uint32_t num_spec_entries = 0; @@ -626,7 +632,7 @@ radv_shader_spirv_to_nir(struct radv_device *device, const struct radv_shader_st }); NIR_PASS(_, nir, nir_lower_load_const_to_scalar); - NIR_PASS(_, nir, nir_opt_shrink_stores, !device->instance->drirc.disable_shrink_image_store); + NIR_PASS(_, nir, nir_opt_shrink_stores, !pdev->instance->drirc.disable_shrink_image_store); if (!stage->key.optimisations_disabled) radv_optimize_nir(nir, false); @@ -2437,10 +2443,10 @@ radv_fill_nir_compiler_options(struct radv_nir_compiler_options *options, struct options->wgp_mode = should_use_wgp; options->info = &pdev->info; options->dump_shader = can_dump_shader; - options->dump_preoptir = options->dump_shader && device->instance->debug_flags & RADV_DEBUG_PREOPTIR; + options->dump_preoptir = options->dump_shader && pdev->instance->debug_flags & RADV_DEBUG_PREOPTIR; options->record_ir = keep_shader_info; options->record_stats = keep_statistic_info; - options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR; + options->check_ir = pdev->instance->debug_flags & RADV_DEBUG_CHECKIR; options->enable_mrt_output_nan_fixup = gfx_state ? gfx_state->ps.epilog.enable_mrt_output_nan_fixup : false; } @@ -2621,9 +2627,8 @@ radv_create_rt_prolog(struct radv_device *device) struct radv_shader_args in_args = {0}; struct radv_shader_args out_args = {0}; struct radv_nir_compiler_options options = {0}; - radv_fill_nir_compiler_options(&options, device, NULL, false, - device->instance->debug_flags & RADV_DEBUG_DUMP_PROLOGS, false, - radv_device_fault_detection_enabled(device), false); + radv_fill_nir_compiler_options(&options, device, NULL, false, pdev->instance->debug_flags & RADV_DEBUG_DUMP_PROLOGS, + false, radv_device_fault_detection_enabled(device), false); struct radv_shader_info info = {0}; info.stage = MESA_SHADER_COMPUTE; info.loads_push_constants = true; @@ -2682,12 +2687,12 @@ done: struct radv_shader_part * radv_create_vs_prolog(struct radv_device *device, const struct radv_vs_prolog_key *key) { + const struct radv_physical_device *pdev = radv_device_physical(device); struct radv_shader_part *prolog; struct radv_shader_args args = {0}; struct radv_nir_compiler_options options = {0}; - radv_fill_nir_compiler_options(&options, device, NULL, false, - device->instance->debug_flags & RADV_DEBUG_DUMP_PROLOGS, false, - radv_device_fault_detection_enabled(device), false); + radv_fill_nir_compiler_options(&options, device, NULL, false, pdev->instance->debug_flags & RADV_DEBUG_DUMP_PROLOGS, + false, radv_device_fault_detection_enabled(device), false); struct radv_shader_info info = {0}; info.stage = MESA_SHADER_VERTEX; @@ -2753,9 +2758,8 @@ radv_create_ps_epilog(struct radv_device *device, const struct radv_ps_epilog_ke struct radv_shader_part *epilog; struct radv_shader_args args = {0}; struct radv_nir_compiler_options options = {0}; - radv_fill_nir_compiler_options(&options, device, NULL, false, - device->instance->debug_flags & RADV_DEBUG_DUMP_EPILOGS, false, - radv_device_fault_detection_enabled(device), false); + radv_fill_nir_compiler_options(&options, device, NULL, false, pdev->instance->debug_flags & RADV_DEBUG_DUMP_EPILOGS, + false, radv_device_fault_detection_enabled(device), false); struct radv_shader_info info = {0}; info.stage = MESA_SHADER_FRAGMENT; diff --git a/src/amd/vulkan/radv_sqtt.c b/src/amd/vulkan/radv_sqtt.c index b1e6c31d709..346c9847801 100644 --- a/src/amd/vulkan/radv_sqtt.c +++ b/src/amd/vulkan/radv_sqtt.c @@ -576,6 +576,7 @@ radv_sqtt_reset_timestamp(struct radv_device *device) static bool radv_sqtt_init_queue_event(struct radv_device *device) { + const struct radv_physical_device *pdev = radv_device_physical(device); VkCommandPool cmd_pool; VkResult result; @@ -590,7 +591,7 @@ radv_sqtt_init_queue_event(struct radv_device *device) device->sqtt_command_pool[0] = vk_command_pool_from_handle(cmd_pool); - if (!(device->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) { + if (!(pdev->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) { const VkCommandPoolCreateInfo create_comp_info = { .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, .queueFamilyIndex = RADV_QUEUE_COMPUTE, diff --git a/src/amd/vulkan/radv_video.c b/src/amd/vulkan/radv_video.c index d28f72c07a2..90a8b50923f 100644 --- a/src/amd/vulkan/radv_video.c +++ b/src/amd/vulkan/radv_video.c @@ -352,7 +352,7 @@ radv_CreateVideoSessionKHR(VkDevice _device, const VkVideoSessionCreateInfoKHR * struct radv_video_session *vid = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*vid), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!vid) - return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + return vk_error(pdev->instance, VK_ERROR_OUT_OF_HOST_MEMORY); memset(vid, 0, sizeof(struct radv_video_session)); @@ -415,10 +415,11 @@ radv_CreateVideoSessionParametersKHR(VkDevice _device, const VkVideoSessionParam RADV_FROM_HANDLE(radv_device, device, _device); RADV_FROM_HANDLE(radv_video_session, vid, pCreateInfo->videoSession); RADV_FROM_HANDLE(radv_video_session_params, templ, pCreateInfo->videoSessionParametersTemplate); + const struct radv_physical_device *pdev = radv_device_physical(device); struct radv_video_session_params *params = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*params), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!params) - return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + return vk_error(pdev->instance, VK_ERROR_OUT_OF_HOST_MEMORY); VkResult result = vk_video_session_parameters_init(&device->vk, ¶ms->vk, &vid->vk, templ ? &templ->vk : NULL, pCreateInfo);