panvk: add bind queue

Bind queue is a queue of sparse resource memory bind operations.
It binds memory to sparse resources. It doesn't map to any
particular kernel object. The queue is equipped with an internal
syncobj to implement PANVK_DEBUG=sync.

Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35287>
This commit is contained in:
Caterina Shablia
2025-06-18 18:07:12 +00:00
committed by Marge Bot
parent 86068ad1ee
commit 3b05d6a7b7
8 changed files with 559 additions and 31 deletions

View File

@@ -92,4 +92,20 @@ VkResult panvk_per_arch(gpu_queue_submit)(struct vk_queue *vk_queue,
struct vk_queue_submit *vk_submit);
VkResult panvk_per_arch(gpu_queue_check_status)(struct vk_queue *vk_queue);
struct panvk_bind_queue {
struct vk_queue vk;
uint32_t syncobj_handle;
};
VK_DEFINE_HANDLE_CASTS(panvk_bind_queue, vk.base, VkQueue, VK_OBJECT_TYPE_QUEUE)
VkResult panvk_per_arch(create_bind_queue)(
struct panvk_device *dev, const VkDeviceQueueCreateInfo *create_info,
uint32_t queue_idx,struct vk_queue **out_queue);
void panvk_per_arch(destroy_bind_queue)(struct vk_queue *vk_queue);
VkResult panvk_per_arch(bind_queue_submit)(struct vk_queue *vk_queue,
struct vk_queue_submit *vk_submit);
VkResult panvk_per_arch(bind_queue_check_status)(struct vk_queue *vk_queue);
#endif

View File

@@ -0,0 +1,429 @@
/*
* Copyright © 2024 Collabora Ltd.
*
* SPDX-License-Identifier: MIT
*/
#include "drm-uapi/panthor_drm.h"
#include "genxml/cs_builder.h"
#include "genxml/decode.h"
#include "panvk_buffer.h"
#include "panvk_cmd_buffer.h"
#include "panvk_device_memory.h"
#include "panvk_macros.h"
#include "panvk_queue.h"
#include "panvk_utrace.h"
#include "util/bitscan.h"
#include "vk_drm_syncobj.h"
#include "vk_log.h"
struct panvk_bind_queue_submit_sync_ops {
struct drm_panthor_sync_op *all;
size_t all_count;
struct drm_panthor_sync_op *waits;
size_t wait_count;
struct drm_panthor_sync_op *signals;
size_t signal_count;
struct drm_panthor_sync_op small_storage[4];
};
static void
panvk_bind_queue_submit_sync_ops_init(
struct panvk_bind_queue_submit_sync_ops *sync_ops,
const struct vk_queue_submit *vk_submit,
const struct drm_panthor_sync_op *extra_signal)
{
size_t signal_count = vk_submit->signal_count;
if (extra_signal)
signal_count++;
size_t all_count = vk_submit->wait_count + signal_count;
sync_ops->all = all_count <= ARRAY_SIZE(sync_ops->small_storage)
? sync_ops->small_storage
: malloc(sizeof(*sync_ops->all) * all_count);
sync_ops->all_count = all_count;
sync_ops->waits = sync_ops->all;
sync_ops->wait_count = vk_submit->wait_count;
sync_ops->signals = sync_ops->all + vk_submit->wait_count;
sync_ops->signal_count = signal_count;
for (uint32_t i = 0; i < vk_submit->wait_count; i++) {
const struct vk_sync_wait *wait = &vk_submit->waits[i];
const struct vk_drm_syncobj *syncobj = vk_sync_as_drm_syncobj(wait->sync);
assert(syncobj);
sync_ops->waits[i] = (struct drm_panthor_sync_op){
.flags = (syncobj->base.flags & VK_SYNC_IS_TIMELINE
? DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ
: DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ) |
DRM_PANTHOR_SYNC_OP_WAIT,
.handle = syncobj->syncobj,
.timeline_value = wait->wait_value,
};
}
uint32_t signal_idx = 0;
for (uint32_t i = 0; i < vk_submit->signal_count; i++) {
const struct vk_sync_signal *signal = &vk_submit->signals[i];
const struct vk_drm_syncobj *syncobj =
vk_sync_as_drm_syncobj(signal->sync);
assert(syncobj);
sync_ops->signals[signal_idx++] = (struct drm_panthor_sync_op){
.flags = (syncobj->base.flags & VK_SYNC_IS_TIMELINE
? DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ
: DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ) |
DRM_PANTHOR_SYNC_OP_SIGNAL,
.handle = syncobj->syncobj,
.timeline_value = signal->signal_value,
};
}
if (extra_signal)
sync_ops->signals[signal_idx++] = *extra_signal;
assert(signal_idx == signal_count);
}
static void
panvk_bind_queue_submit_sync_ops_cleanup(
struct panvk_bind_queue_submit_sync_ops *sync_ops)
{
if (sync_ops->all != sync_ops->small_storage)
free(sync_ops->all);
}
struct panvk_bind_queue_submit {
struct panvk_bind_queue *queue;
bool force_sync;
struct panvk_bind_queue_submit_sync_ops sync_ops;
struct drm_panthor_vm_bind_op *bind_ops;
size_t bind_op_count; /* number of bind ops buffered */
size_t bind_op_cap; /* size of the bind op buffer */
struct drm_panthor_vm_bind_op bind_op_small_storage[16];
};
static void
panvk_bind_queue_submit_init(struct panvk_bind_queue_submit *submit,
struct vk_queue *vk_queue,
struct vk_queue_submit *vk_submit)
{
struct panvk_bind_queue *queue =
container_of(vk_queue, struct panvk_bind_queue, vk);
struct panvk_instance *instance =
to_panvk_instance(queue->vk.base.device->physical->base.instance);
bool force_sync = instance->debug_flags & PANVK_DEBUG_SYNC;
*submit = (struct panvk_bind_queue_submit){
.queue = queue,
.force_sync = force_sync,
};
struct drm_panthor_sync_op syncobj_signal = {
.flags =
DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ | DRM_PANTHOR_SYNC_OP_SIGNAL,
.handle = queue->syncobj_handle,
};
panvk_bind_queue_submit_sync_ops_init(&submit->sync_ops, vk_submit,
force_sync ? &syncobj_signal : NULL);
/* TODO: guess how many bind ops we'll need and allocate that much, unless
* it's too much
*/
uint32_t bind_op_cap = ARRAY_SIZE(submit->bind_op_small_storage);
submit->bind_ops = bind_op_cap <= ARRAY_SIZE(submit->bind_op_small_storage)
? submit->bind_op_small_storage
: malloc(sizeof(*submit->bind_ops) * bind_op_cap);
submit->bind_op_cap = bind_op_cap;
}
static void
panvk_bind_queue_submit_cleanup(struct panvk_bind_queue_submit *submit)
{
panvk_bind_queue_submit_sync_ops_cleanup(&submit->sync_ops);
if (submit->bind_ops != submit->bind_op_small_storage)
free(submit->bind_ops);
}
static int
panvk_bind_queue_submit_flush(struct panvk_bind_queue_submit *submit)
{
struct panvk_device *dev = to_panvk_device(submit->queue->vk.base.device);
if (submit->bind_op_count == 0)
return 0;
struct drm_panthor_vm_bind req = {
.vm_id = dev->kmod.vm->handle,
.flags = DRM_PANTHOR_VM_BIND_ASYNC,
.ops = DRM_PANTHOR_OBJ_ARRAY(submit->bind_op_count, submit->bind_ops),
};
int ret = pan_kmod_ioctl(dev->drm_fd, DRM_IOCTL_PANTHOR_VM_BIND, &req);
submit->bind_op_count = 0;
return ret;
}
static int
panvk_bind_queue_submit_vm_bind(
struct panvk_bind_queue_submit *submit,
const struct drm_panthor_vm_bind_op *op)
{
if (submit->bind_op_count == submit->bind_op_cap) {
int ret = panvk_bind_queue_submit_flush(submit);
if (ret)
return ret;
}
struct drm_panthor_vm_bind_op tmp = *op;
assert(!tmp.syncs.array);
if (submit->sync_ops.wait_count > 0) {
tmp.syncs = (struct drm_panthor_obj_array)DRM_PANTHOR_OBJ_ARRAY(
submit->sync_ops.wait_count, submit->sync_ops.waits);
submit->sync_ops.wait_count = 0;
}
assert(submit->bind_op_count < submit->bind_op_cap);
submit->bind_ops[submit->bind_op_count++] = tmp;
return 0;
}
static int
panvk_bind_queue_submit_process_signals(struct panvk_bind_queue_submit *submit)
{
struct panvk_bind_queue *queue = submit->queue;
struct panvk_device *device =
to_panvk_device(queue->vk.base.device);
struct drm_panthor_vm_bind_op tmp = {
.flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY,
};
struct drm_panthor_vm_bind_op *op = submit->bind_op_count > 0
? &submit->bind_ops[submit->bind_op_count - 1]
: &tmp;
assert(!op->syncs.array ||
(op->syncs.array == (__u64)(uintptr_t)submit->sync_ops.waits &&
op->syncs.count > 0));
/* If we
* - have not processed waits (i.e. have not done any bind ops at all), or
* - the op we're about to make do signals already waits
*
* the op will be made to do all the sync ops.
*/
op->syncs = submit->sync_ops.wait_count > 0 || op->syncs.array
? (struct drm_panthor_obj_array)DRM_PANTHOR_OBJ_ARRAY(
submit->sync_ops.all_count, submit->sync_ops.all)
: (struct drm_panthor_obj_array)DRM_PANTHOR_OBJ_ARRAY(
submit->sync_ops.signal_count, submit->sync_ops.signals);
if (op == &tmp && op->syncs.count > 0) {
/* If we were filling out tmp, it wasn't in the bind op buffer. Now is a
* good time to plop it into there.
*/
assert(submit->bind_op_count < submit->bind_op_cap);
submit->bind_ops[submit->bind_op_count++] = tmp;
}
int ret = panvk_bind_queue_submit_flush(submit);
if (ret)
return ret;
if (submit->force_sync) {
int ret = drmSyncobjWait(device->drm_fd, &queue->syncobj_handle, 1,
INT64_MAX, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL,
NULL);
assert(!ret);
drmSyncobjReset(device->drm_fd, &queue->syncobj_handle, 1);
}
return 0;
}
static int
panvk_bind_queue_submit_map_to_blackhole(
struct panvk_bind_queue_submit *submit,
uint64_t base_va, uint64_t size)
{
struct panvk_device *dev = to_panvk_device(submit->queue->vk.base.device);
struct pan_kmod_bo *blackhole = panvk_get_blackhole(dev);
uint64_t blackhole_size = blackhole->size;
uint64_t off = 0;
while (off < size) {
uint64_t bo_offset = base_va & (blackhole_size - 1);
uint64_t va = base_va + off;
uint64_t va_range = MIN2(blackhole_size - bo_offset, size - off);
struct drm_panthor_vm_bind_op op = {
.flags = DRM_PANTHOR_VM_BIND_OP_TYPE_MAP,
.bo_handle = blackhole->handle,
.bo_offset = bo_offset,
.va = va,
.size = va_range,
};
int ret = panvk_bind_queue_submit_vm_bind(submit, &op);
if (!ret)
return ret;
off += va_range;
}
return 0;
}
static int
panvk_bind_queue_submit_sparse_memory_bind(
struct panvk_bind_queue_submit *submit,
VkDeviceAddress resource_va, const VkSparseMemoryBind *in)
{
VK_FROM_HANDLE(panvk_device_memory, mem, in->memory);
if (mem) {
struct drm_panthor_vm_bind_op op = {
.flags = DRM_PANTHOR_VM_BIND_OP_TYPE_MAP,
.bo_handle = mem->bo->handle,
.bo_offset = in->memoryOffset,
.va = resource_va + in->resourceOffset,
.size = in->size,
};
return panvk_bind_queue_submit_vm_bind(submit, &op);
} else {
return panvk_bind_queue_submit_map_to_blackhole(submit, resource_va, in->size);
}
}
static int
panvk_bind_queue_submit_do(struct panvk_bind_queue_submit *submit,
const struct vk_queue_submit *vk_submit)
{
int ret;
for (uint32_t i = 0; i < vk_submit->buffer_bind_count; i++) {
VK_FROM_HANDLE(panvk_buffer, buf, vk_submit->buffer_binds[i].buffer);
assert(buf->vk.create_flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT);
uint64_t resource_va = buf->vk.device_address;
for (uint32_t j = 0; j < vk_submit->buffer_binds[i].bindCount; j++) {
const VkSparseMemoryBind *in = &vk_submit->buffer_binds[i].pBinds[j];
ret = panvk_bind_queue_submit_sparse_memory_bind(submit, resource_va, in);
if (ret)
return ret;
}
}
for (uint32_t i = 0; i < vk_submit->image_opaque_bind_count; i++) {
VK_FROM_HANDLE(panvk_image, image,
vk_submit->image_opaque_binds[i].image);
assert(image->vk.create_flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT);
uint64_t resource_va = image->sparse.device_address;
for (uint32_t j = 0; j < vk_submit->image_opaque_binds[i].bindCount;
j++) {
const VkSparseMemoryBind *in =
&vk_submit->image_opaque_binds[i].pBinds[j];
ret = panvk_bind_queue_submit_sparse_memory_bind(submit, resource_va, in);
if (ret)
return ret;
}
}
for (uint32_t i = 0; i < vk_submit->image_bind_count; i++) {
UNREACHABLE("not implemented");
}
return panvk_bind_queue_submit_process_signals(submit);
}
VkResult
panvk_per_arch(bind_queue_submit)(struct vk_queue *vk_queue,
struct vk_queue_submit *vk_submit)
{
struct panvk_bind_queue_submit submit;
VkResult result = VK_SUCCESS;
if (vk_queue_is_lost(vk_queue))
return VK_ERROR_DEVICE_LOST;
panvk_bind_queue_submit_init(&submit, vk_queue, vk_submit);
int ret = panvk_bind_queue_submit_do(&submit, vk_submit);
if (ret)
result = vk_queue_set_lost(vk_queue, "GROUP_SUBMIT: %m");
panvk_bind_queue_submit_cleanup(&submit);
return result;
}
VkResult
panvk_per_arch(create_bind_queue)(struct panvk_device *dev,
const VkDeviceQueueCreateInfo *create_info,
uint32_t queue_idx,
struct vk_queue **out_queue)
{
struct panvk_bind_queue *queue = vk_zalloc(
&dev->vk.alloc, sizeof(*queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!queue)
return panvk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult result =
vk_queue_init(&queue->vk, &dev->vk, create_info, queue_idx);
if (result != VK_SUCCESS)
goto err_free_queue;
int ret = drmSyncobjCreate(dev->drm_fd, 0, &queue->syncobj_handle);
if (ret) {
result = panvk_errorf(dev, VK_ERROR_INITIALIZATION_FAILED,
"Failed to create our internal sync object");
goto err_finish_queue;
}
queue->vk.driver_submit = panvk_per_arch(bind_queue_submit);
*out_queue = &queue->vk;
return VK_SUCCESS;
err_finish_queue:
vk_queue_finish(&queue->vk);
err_free_queue:
vk_free(&dev->vk.alloc, queue);
return result;
}
void
panvk_per_arch(destroy_bind_queue)(struct vk_queue *vk_queue)
{
struct panvk_bind_queue *queue =
container_of(vk_queue, struct panvk_bind_queue, vk);
struct panvk_device *dev = to_panvk_device(queue->vk.base.device);
drmSyncobjDestroy(dev->drm_fd, queue->syncobj_handle);
vk_queue_finish(&queue->vk);
vk_free(&dev->vk.alloc, queue);
}
VkResult
panvk_per_arch(bind_queue_check_status)(struct vk_queue *vk_queue)
{
return VK_SUCCESS;
}

View File

@@ -31,4 +31,12 @@ VkResult panvk_per_arch(gpu_queue_submit)(struct vk_queue *vk_queue,
struct vk_queue_submit *vk_submit);
VkResult panvk_per_arch(gpu_queue_check_status)(struct vk_queue *vk_queue);
VkResult panvk_per_arch(create_bind_queue)(
struct panvk_device *device, const VkDeviceQueueCreateInfo *create_info,
uint32_t queue_idx, struct vk_queue **out_queue);
void panvk_per_arch(destroy_bind_queue)(struct vk_queue *vk_queue);
VkResult panvk_per_arch(bind_queue_submit)(struct vk_queue *vk_queue,
struct vk_queue_submit *vk_submit);
VkResult panvk_per_arch(bind_queue_check_status)(struct vk_queue *vk_queue);
#endif

View File

@@ -0,0 +1,28 @@
#include "panvk_queue.h"
VkResult
panvk_per_arch(create_bind_queue)(
struct panvk_device *device, const VkDeviceQueueCreateInfo *create_info,
uint32_t queue_idx, struct vk_queue **out_queue)
{
return VK_ERROR_INITIALIZATION_FAILED;
}
void
panvk_per_arch(destroy_bind_queue)(struct vk_queue *vk_queue)
{
UNREACHABLE("");
}
VkResult
panvk_per_arch(bind_queue_submit)(struct vk_queue *vk_queue,
struct vk_queue_submit *vk_submit)
{
UNREACHABLE("");
}
VkResult
panvk_per_arch(bind_queue_check_status)(struct vk_queue *vk_queue)
{
UNREACHABLE("");
}

View File

@@ -72,6 +72,7 @@ avalon_files = []
jm_archs = [6, 7]
jm_inc_dir = ['jm']
jm_files = [
'jm/panvk_vX_bind_queue.c',
'jm/panvk_vX_cmd_buffer.c',
'jm/panvk_vX_cmd_dispatch.c',
'jm/panvk_vX_cmd_draw.c',
@@ -85,6 +86,7 @@ jm_files = [
csf_archs = [10, 12, 13]
csf_inc_dir = ['csf']
csf_files = [
'csf/panvk_vX_bind_queue.c',
'csf/panvk_vX_cmd_buffer.c',
'csf/panvk_vX_cmd_dispatch.c',
'csf/panvk_vX_cmd_draw.c',

View File

@@ -45,6 +45,7 @@ struct panvk_device_draw_context;
enum panvk_queue_family {
PANVK_QUEUE_FAMILY_GPU,
PANVK_QUEUE_FAMILY_BIND,
PANVK_QUEUE_FAMILY_COUNT,
};

View File

@@ -366,23 +366,32 @@ fail:
static void
panvk_fill_global_priority(const struct panvk_physical_device *physical_device,
uint32_t family_idx,
VkQueueFamilyGlobalPriorityPropertiesKHR *prio)
{
enum pan_kmod_group_allow_priority_flags prio_mask =
physical_device->kmod.props.allowed_group_priorities_mask;
uint32_t prio_idx = 0;
switch (family_idx) {
case PANVK_QUEUE_FAMILY_GPU: {
enum pan_kmod_group_allow_priority_flags prio_mask =
physical_device->kmod.props.allowed_group_priorities_mask;
if (prio_mask & PAN_KMOD_GROUP_ALLOW_PRIORITY_LOW)
prio->priorities[prio_idx++] = VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR;
if (prio_mask & PAN_KMOD_GROUP_ALLOW_PRIORITY_MEDIUM)
prio->priorities[prio_idx++] = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR;
if (prio_mask & PAN_KMOD_GROUP_ALLOW_PRIORITY_HIGH)
prio->priorities[prio_idx++] = VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR;
if (prio_mask & PAN_KMOD_GROUP_ALLOW_PRIORITY_REALTIME)
prio->priorities[prio_idx++] = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR;
break;
}
if (prio_mask & PAN_KMOD_GROUP_ALLOW_PRIORITY_LOW)
prio->priorities[prio_idx++] = VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR;
case PANVK_QUEUE_FAMILY_BIND:
prio->priorities[prio_idx++] = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM;
break;
if (prio_mask & PAN_KMOD_GROUP_ALLOW_PRIORITY_MEDIUM)
prio->priorities[prio_idx++] = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR;
if (prio_mask & PAN_KMOD_GROUP_ALLOW_PRIORITY_HIGH)
prio->priorities[prio_idx++] = VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR;
if (prio_mask & PAN_KMOD_GROUP_ALLOW_PRIORITY_REALTIME)
prio->priorities[prio_idx++] = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR;
default:
UNREACHABLE("Unknown queue family");
}
prio->priorityCount = prio_idx;
}
@@ -397,11 +406,10 @@ panvk_GetPhysicalDeviceQueueFamilyProperties2(
pQueueFamilyPropertyCount);
unsigned arch = pan_arch(physical_device->kmod.props.gpu_id);
vk_outarray_append_typed(VkQueueFamilyProperties2, &out, p)
{
p->queueFamilyProperties = (VkQueueFamilyProperties){
.queueFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT |
VK_QUEUE_TRANSFER_BIT,
const VkQueueFamilyProperties qfamily_props[PANVK_QUEUE_FAMILY_COUNT] = {
[PANVK_QUEUE_FAMILY_GPU] = {
.queueFlags =
VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
/* On v10+ we can support up to 127 queues but this causes timeout on
some CTS tests */
.queueCount = arch >= 10 ? 2 : 1,
@@ -409,13 +417,27 @@ panvk_GetPhysicalDeviceQueueFamilyProperties2(
arch >= 10 && physical_device->kmod.props.gpu_can_query_timestamp
? 64
: 0,
.minImageTransferGranularity = (VkExtent3D){1, 1, 1},
};
.minImageTransferGranularity = {1, 1, 1},
},
[PANVK_QUEUE_FAMILY_BIND] = {
.queueFlags = VK_QUEUE_SPARSE_BINDING_BIT,
.queueCount = 1,
},
};
VkQueueFamilyGlobalPriorityPropertiesKHR *prio =
vk_find_struct(p->pNext, QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR);
if (prio)
panvk_fill_global_priority(physical_device, prio);
for (uint32_t family = 0; family < ARRAY_SIZE(qfamily_props); family++) {
if (family == PANVK_QUEUE_FAMILY_BIND &&
!physical_device->vk.supported_features.sparseBinding)
break;
vk_outarray_append_typed(VkQueueFamilyProperties2, &out, p) {
p->queueFamilyProperties = qfamily_props[family];
VkQueueFamilyGlobalPriorityPropertiesKHR *prio =
vk_find_struct(p->pNext, QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR);
if (prio)
panvk_fill_global_priority(physical_device, family, prio);
}
}
}

View File

@@ -212,15 +212,29 @@ check_global_priority(const struct panvk_physical_device *phys_dev,
priority_info ? priority_info->globalPriority
: VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR;
enum pan_kmod_group_allow_priority_flags requested_prio =
global_priority_to_group_allow_priority_flag(priority);
enum pan_kmod_group_allow_priority_flags allowed_prio_mask =
phys_dev->kmod.props.allowed_group_priorities_mask;
switch (create_info->queueFamilyIndex) {
case PANVK_QUEUE_FAMILY_GPU: {
enum pan_kmod_group_allow_priority_flags requested_prio =
global_priority_to_group_allow_priority_flag(priority);
enum pan_kmod_group_allow_priority_flags allowed_prio_mask =
phys_dev->kmod.props.allowed_group_priorities_mask;
if (requested_prio & allowed_prio_mask)
return VK_SUCCESS;
if (requested_prio & allowed_prio_mask)
return VK_SUCCESS;
return VK_ERROR_NOT_PERMITTED_KHR;
return VK_ERROR_NOT_PERMITTED_KHR;
}
case PANVK_QUEUE_FAMILY_BIND: {
if (priority == VK_QUEUE_GLOBAL_PRIORITY_MEDIUM)
return VK_SUCCESS;
return VK_ERROR_NOT_PERMITTED_KHR;
}
default:
UNREACHABLE("Unknown queue family");
}
}
static VkResult
@@ -229,8 +243,10 @@ panvk_queue_check_status(struct vk_queue *queue)
switch (queue->queue_family_index) {
case PANVK_QUEUE_FAMILY_GPU:
return panvk_per_arch(gpu_queue_check_status)(queue);
case PANVK_QUEUE_FAMILY_BIND:
return panvk_per_arch(bind_queue_check_status)(queue);
default:
return VK_SUCCESS;
UNREACHABLE("Unknown queue family");
}
}
@@ -280,6 +296,9 @@ panvk_queue_create(struct panvk_device *dev,
case PANVK_QUEUE_FAMILY_GPU:
return panvk_per_arch(create_gpu_queue)(
dev, create_info, queue_idx, out_queue);
case PANVK_QUEUE_FAMILY_BIND:
return panvk_per_arch(create_bind_queue)(
dev, create_info, queue_idx, out_queue);
default:
return panvk_error(dev, VK_ERROR_INITIALIZATION_FAILED);
}
@@ -292,6 +311,9 @@ panvk_queue_destroy(struct vk_queue *queue)
case PANVK_QUEUE_FAMILY_GPU:
panvk_per_arch(destroy_gpu_queue)(queue);
break;
case PANVK_QUEUE_FAMILY_BIND:
panvk_per_arch(destroy_bind_queue)(queue);
break;
default:
UNREACHABLE("Unknown queue family");
}