asahi: enable virtgpu support
this updates our virtgpu support to match the upstream protocol, and enables. it is marked for backport since the older Mesa releases already had virtgpu support, just gated off to allow for breaking wire protocol changes (of which there was 1 at the last moment). Upstream virglrenderer MR assigned to marge: https://gitlab.freedesktop.org/virgl/virglrenderer/-/merge_requests/1541 Backport-to: 25.1 Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36580>
This commit is contained in:
committed by
Marge Bot
parent
d7b17d4d9c
commit
20dab5f819
@@ -84,7 +84,7 @@ static const struct debug_named_value agx_debug_options[] = {
|
||||
void
|
||||
agx_bo_free(struct agx_device *dev, struct agx_bo *bo)
|
||||
{
|
||||
const uint64_t handle = bo->uapi_handle;
|
||||
const uint64_t handle = bo->handle;
|
||||
|
||||
if (bo->_map)
|
||||
munmap(bo->_map, bo->size);
|
||||
@@ -521,9 +521,6 @@ agx_open_device(void *memctx, struct agx_device *dev)
|
||||
dev->is_virtio = false;
|
||||
dev->ops = agx_device_drm_ops;
|
||||
} else if (!strcmp(version->name, "virtio_gpu")) {
|
||||
/* TODO: virtio wire protocol is not stable yet */
|
||||
return false;
|
||||
|
||||
dev->is_virtio = true;
|
||||
if (!agx_virtio_open_device(dev)) {
|
||||
fprintf(
|
||||
|
||||
@@ -68,6 +68,7 @@ struct nir_shader;
|
||||
struct agx_submit_virt {
|
||||
uint32_t extres_count;
|
||||
struct asahi_ccmd_submit_res *extres;
|
||||
uint32_t ring_idx;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
|
||||
@@ -124,7 +124,7 @@ agx_virtio_bo_bind(struct agx_device *dev, struct drm_asahi_gem_bind_op *ops,
|
||||
|
||||
*req = (struct asahi_ccmd_vm_bind_req){
|
||||
.hdr.cmd = ASAHI_CCMD_VM_BIND,
|
||||
.hdr.len = sizeof(struct asahi_ccmd_vm_bind_req),
|
||||
.hdr.len = req_len,
|
||||
.vm_id = dev->vm_id,
|
||||
.stride = sizeof(*ops),
|
||||
.count = count,
|
||||
@@ -215,12 +215,8 @@ agx_virtio_get_params(struct agx_device *dev, void *buf, size_t size)
|
||||
sizeof(struct asahi_ccmd_get_params_rsp) + size);
|
||||
|
||||
int ret = vdrm_send_req(vdrm, &req.hdr, true);
|
||||
if (!ret)
|
||||
return ret;
|
||||
|
||||
ret = rsp->ret;
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret || rsp->ret)
|
||||
return ret ? ret : rsp->ret;
|
||||
|
||||
memcpy(buf, &rsp->payload, size);
|
||||
return size;
|
||||
@@ -265,8 +261,7 @@ agx_virtio_submit(struct agx_device *dev, struct drm_asahi_submit *submit,
|
||||
}
|
||||
|
||||
struct vdrm_execbuf_params p = {
|
||||
/* Signal the host we want to wait for the command to complete */
|
||||
.ring_idx = 1,
|
||||
.ring_idx = virt->ring_idx,
|
||||
.req = &req->hdr,
|
||||
.num_in_syncobjs = submit->in_sync_count,
|
||||
.in_syncobjs = vdrm_syncs,
|
||||
@@ -296,7 +291,7 @@ agx_virtio_open_device(struct agx_device *dev)
|
||||
{
|
||||
struct vdrm_device *vdrm;
|
||||
|
||||
vdrm = vdrm_device_connect(dev->fd, 2);
|
||||
vdrm = vdrm_device_connect(dev->fd, 4);
|
||||
if (!vdrm) {
|
||||
fprintf(stderr, "could not connect vdrm\n");
|
||||
return false;
|
||||
|
||||
@@ -267,9 +267,10 @@ max_commands_per_submit(struct hk_device *dev)
|
||||
}
|
||||
|
||||
static VkResult
|
||||
queue_submit_single(struct hk_device *dev, struct drm_asahi_submit *submit)
|
||||
queue_submit_single(struct hk_device *dev, struct drm_asahi_submit *submit,
|
||||
unsigned ring_idx)
|
||||
{
|
||||
struct agx_submit_virt virt = {0};
|
||||
struct agx_submit_virt virt = {.ring_idx = ring_idx};
|
||||
|
||||
if (dev->dev.is_virtio) {
|
||||
u_rwlock_rdlock(&dev->external_bos.lock);
|
||||
@@ -300,7 +301,7 @@ queue_submit_single(struct hk_device *dev, struct drm_asahi_submit *submit)
|
||||
*/
|
||||
static VkResult
|
||||
queue_submit_looped(struct hk_device *dev, struct drm_asahi_submit *submit,
|
||||
unsigned command_count)
|
||||
unsigned command_count, unsigned ring_idx)
|
||||
{
|
||||
uint8_t *cmdbuf = (uint8_t *)(uintptr_t)submit->cmdbuf;
|
||||
uint32_t offs = 0;
|
||||
@@ -373,7 +374,7 @@ queue_submit_looped(struct hk_device *dev, struct drm_asahi_submit *submit,
|
||||
.out_sync_count = has_out_syncs ? submit->out_sync_count : 0,
|
||||
};
|
||||
|
||||
VkResult result = queue_submit_single(dev, &submit_ioctl);
|
||||
VkResult result = queue_submit_single(dev, &submit_ioctl, ring_idx);
|
||||
if (result != VK_SUCCESS)
|
||||
return result;
|
||||
|
||||
@@ -886,10 +887,13 @@ queue_submit(struct hk_device *dev, struct hk_queue *queue,
|
||||
};
|
||||
|
||||
VkResult result;
|
||||
if (command_count <= max_commands_per_submit(dev))
|
||||
result = queue_submit_single(dev, &submit_ioctl);
|
||||
else
|
||||
result = queue_submit_looped(dev, &submit_ioctl, command_count);
|
||||
if (command_count <= max_commands_per_submit(dev)) {
|
||||
result =
|
||||
queue_submit_single(dev, &submit_ioctl, queue->drm.virt_ring_idx);
|
||||
} else {
|
||||
result = queue_submit_looped(dev, &submit_ioctl, command_count,
|
||||
queue->drm.virt_ring_idx);
|
||||
}
|
||||
|
||||
util_dynarray_fini(&payload);
|
||||
return result;
|
||||
@@ -979,6 +983,7 @@ hk_queue_init(struct hk_device *dev, struct hk_queue *queue,
|
||||
queue->vk.driver_submit = hk_queue_submit;
|
||||
|
||||
queue->drm.id = agx_create_command_queue(&dev->dev, drm_priority);
|
||||
queue->drm.virt_ring_idx = drm_priority + 1;
|
||||
|
||||
if (drmSyncobjCreate(dev->dev.fd, 0, &queue->drm.syncobj)) {
|
||||
mesa_loge("drmSyncobjCreate() failed %d\n", errno);
|
||||
|
||||
@@ -22,6 +22,9 @@ struct hk_queue {
|
||||
/* Timeline syncobj backing the queue */
|
||||
uint32_t syncobj;
|
||||
|
||||
/* Ring-idx used with virtgpu, equal to priority + 1 */
|
||||
uint32_t virt_ring_idx;
|
||||
|
||||
/* Current maximum timeline value for the queue's syncobj. If the
|
||||
* syncobj's value equals timeline_value, then all work is complete.
|
||||
*/
|
||||
|
||||
@@ -659,7 +659,7 @@ agx_batch_submit(struct agx_context *ctx, struct agx_batch *batch,
|
||||
|
||||
uint64_t wait_seqid = p_atomic_read(&screen->flush_wait_seqid);
|
||||
|
||||
struct agx_submit_virt virt = {0};
|
||||
struct agx_submit_virt virt = {.ring_idx = ctx->virt_ring_idx};
|
||||
|
||||
/* Elide syncing against our own queue */
|
||||
if (wait_seqid && wait_seqid == ctx->flush_my_seqid) {
|
||||
|
||||
@@ -1751,6 +1751,7 @@ agx_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
|
||||
*/
|
||||
|
||||
ctx->queue_id = agx_create_command_queue(agx_device(screen), priority);
|
||||
ctx->virt_ring_idx = priority + 1;
|
||||
|
||||
pctx->destroy = agx_destroy_context;
|
||||
pctx->flush = agx_flush;
|
||||
|
||||
@@ -634,7 +634,7 @@ struct agx_context {
|
||||
} batches;
|
||||
|
||||
/* Queue handle */
|
||||
uint32_t queue_id;
|
||||
uint32_t queue_id, virt_ring_idx;
|
||||
|
||||
struct agx_batch *batch;
|
||||
struct agx_bo *timestamps;
|
||||
|
||||
Reference in New Issue
Block a user