nvk/nvkmd: Track all memory objects by default

A single lock on allocate/free is nothing compared to the ioctls we're
already taking.  This ensures that we always have all our memory
objects.

Reviewed-by: Mel Henning <mhenning@darkrefraction.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36995>
This commit is contained in:
Faith Ekstrand
2025-06-13 14:31:20 -04:00
committed by Marge Bot
parent 7f95c07080
commit 7a50d7004b
3 changed files with 64 additions and 44 deletions

View File

@@ -239,14 +239,8 @@ nvk_bind_buffer_memory(struct nvk_device *dev,
{
VK_FROM_HANDLE(nvk_device_memory, mem, info->memory);
VK_FROM_HANDLE(nvk_buffer, buffer, info->buffer);
const struct nvk_physical_device *pdev = nvk_device_physical(dev);
VkResult result = VK_SUCCESS;
if ((pdev->debug_flags & NVK_DEBUG_PUSH_DUMP) &&
(buffer->vk.usage & (VK_BUFFER_USAGE_2_INDIRECT_BUFFER_BIT_KHR |
VK_BUFFER_USAGE_2_PREPROCESS_BUFFER_BIT_EXT)))
nvkmd_dev_track_mem(dev->nvkmd, mem->mem);
if (buffer->va != NULL) {
result = nvkmd_va_bind_mem(buffer->va, &buffer->vk.base,
0 /* va_offset */,

View File

@@ -8,26 +8,22 @@
#include <inttypes.h>
void
nvkmd_dev_track_mem(struct nvkmd_dev *dev,
struct nvkmd_mem *mem)
static void
nvkmd_dev_add_mem(struct nvkmd_dev *dev,
struct nvkmd_mem *mem)
{
if (mem->link.next == NULL) {
simple_mtx_lock(&dev->mems_mutex);
list_addtail(&mem->link, &dev->mems);
simple_mtx_unlock(&dev->mems_mutex);
}
simple_mtx_lock(&dev->mems_mutex);
list_addtail(&mem->link, &dev->mems);
simple_mtx_unlock(&dev->mems_mutex);
}
static void
nvkmd_dev_untrack_mem(struct nvkmd_dev *dev,
struct nvkmd_mem *mem)
nvkmd_dev_remove_mem(struct nvkmd_dev *dev,
struct nvkmd_mem *mem)
{
if (mem->link.next != NULL) {
simple_mtx_lock(&dev->mems_mutex);
list_del(&mem->link);
simple_mtx_unlock(&dev->mems_mutex);
}
simple_mtx_lock(&dev->mems_mutex);
list_del(&mem->link);
simple_mtx_unlock(&dev->mems_mutex);
}
static struct nvkmd_mem *
@@ -92,6 +88,38 @@ nvkmd_try_create_pdev_for_drm(struct _drmDevice *drm_device,
debug_flags, pdev_out);
}
VkResult MUST_CHECK
nvkmd_dev_alloc_mem(struct nvkmd_dev *dev,
struct vk_object_base *log_obj,
uint64_t size_B, uint64_t align_B,
enum nvkmd_mem_flags flags,
struct nvkmd_mem **mem_out)
{
VkResult result = dev->ops->alloc_mem(dev, log_obj, size_B, align_B,
flags, mem_out);
if (result == VK_SUCCESS)
nvkmd_dev_add_mem(dev, *mem_out);
return result;
}
VkResult MUST_CHECK
nvkmd_dev_alloc_tiled_mem(struct nvkmd_dev *dev,
struct vk_object_base *log_obj,
uint64_t size_B, uint64_t align_B,
uint8_t pte_kind, uint16_t tile_mode,
enum nvkmd_mem_flags flags,
struct nvkmd_mem **mem_out)
{
VkResult result = dev->ops->alloc_tiled_mem(dev, log_obj, size_B, align_B,
pte_kind, tile_mode,
flags, mem_out);
if (result == VK_SUCCESS)
nvkmd_dev_add_mem(dev, *mem_out);
return result;
}
VkResult
nvkmd_dev_alloc_mapped_mem(struct nvkmd_dev *dev,
struct vk_object_base *log_obj,
@@ -120,6 +148,18 @@ nvkmd_dev_alloc_mapped_mem(struct nvkmd_dev *dev,
return VK_SUCCESS;
}
VkResult MUST_CHECK
nvkmd_dev_import_dma_buf(struct nvkmd_dev *dev,
struct vk_object_base *log_obj,
int fd, struct nvkmd_mem **mem_out)
{
VkResult result = dev->ops->import_dma_buf(dev, log_obj, fd, mem_out);
if (result == VK_SUCCESS)
nvkmd_dev_add_mem(dev, *mem_out);
return result;
}
VkResult MUST_CHECK
nvkmd_dev_alloc_va(struct nvkmd_dev *dev,
struct vk_object_base *log_obj,
@@ -269,7 +309,7 @@ nvkmd_mem_unref(struct nvkmd_mem *mem)
if (mem->map != NULL)
mem->ops->unmap(mem, 0, mem->map);
nvkmd_dev_untrack_mem(mem->dev, mem);
nvkmd_dev_remove_mem(mem->dev, mem);
mem->ops->free(mem);
}

View File

@@ -424,27 +424,20 @@ nvkmd_dev_get_drm_fd(struct nvkmd_dev *dev)
return dev->ops->get_drm_fd(dev);
}
static inline VkResult MUST_CHECK
VkResult MUST_CHECK
nvkmd_dev_alloc_mem(struct nvkmd_dev *dev,
struct vk_object_base *log_obj,
uint64_t size_B, uint64_t align_B,
enum nvkmd_mem_flags flags,
struct nvkmd_mem **mem_out)
{
return dev->ops->alloc_mem(dev, log_obj, size_B, align_B, flags, mem_out);
}
struct nvkmd_mem **mem_out);
static inline VkResult MUST_CHECK
VkResult MUST_CHECK
nvkmd_dev_alloc_tiled_mem(struct nvkmd_dev *dev,
struct vk_object_base *log_obj,
uint64_t size_B, uint64_t align_B,
uint8_t pte_kind, uint16_t tile_mode,
enum nvkmd_mem_flags flags,
struct nvkmd_mem **mem_out)
{
return dev->ops->alloc_tiled_mem(dev, log_obj, size_B, align_B,
pte_kind, tile_mode, flags, mem_out);
}
struct nvkmd_mem **mem_out);
/* Implies NVKMD_MEM_CAN_MAP */
VkResult MUST_CHECK
@@ -455,23 +448,16 @@ nvkmd_dev_alloc_mapped_mem(struct nvkmd_dev *dev,
enum nvkmd_mem_map_flags map_flags,
struct nvkmd_mem **mem_out);
void
nvkmd_dev_track_mem(struct nvkmd_dev *dev,
struct nvkmd_mem *mem);
VkResult MUST_CHECK
nvkmd_dev_import_dma_buf(struct nvkmd_dev *dev,
struct vk_object_base *log_obj,
int fd, struct nvkmd_mem **mem_out);
struct nvkmd_mem *
nvkmd_dev_lookup_mem_by_va(struct nvkmd_dev *dev,
uint64_t addr,
uint64_t *offset_out);
static inline VkResult MUST_CHECK
nvkmd_dev_import_dma_buf(struct nvkmd_dev *dev,
struct vk_object_base *log_obj,
int fd, struct nvkmd_mem **mem_out)
{
return dev->ops->import_dma_buf(dev, log_obj, fd, mem_out);
}
VkResult MUST_CHECK
nvkmd_dev_alloc_va(struct nvkmd_dev *dev,
struct vk_object_base *log_obj,