intel/ds: Fix crash when allocating more intel_ds_queues than u_vector was initialized

u_vector_add() don't keep the returned pointers valid.
After the initial size allocated in u_vector_init() is reached it will
allocate a bigger buffer and copy data from older buffer to the new
one and free the old buffer, making all the previous pointers returned
by u_vector_add() invalid and crashing the application when trying to
access it.

This is reproduced when running
dEQP-VK.synchronization.signal_order.timeline_semaphore.* in DG2 SKUs
that has 4 CCS engines, INTEL_COMPUTE_CLASS=1 is set and of course
perfetto build is enabled.

To fix this issue here I'm moving the storage/allocation of
struct intel_ds_queue to struct anv_queue/iris_batch and using
struct list_head to maintain a chain of intel_ds_queue of the
intel_ds_device.
This allows us to append or remove queues dynamically in future if
necessary.

Fixes: e760c5b37b ("anv: add perfetto source")
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20977>
This commit is contained in:
José Roberto de Souza
2023-01-23 12:09:56 -08:00
committed by Marge Bot
parent 1b3c746eec
commit 8092bc2158
11 changed files with 38 additions and 39 deletions
+3 -3
View File
@@ -1048,10 +1048,10 @@ _iris_batch_flush(struct iris_batch *batch, const char *file, int line)
}
uint64_t start_ts = intel_ds_begin_submit(batch->ds);
uint64_t submission_id = batch->ds->submission_id;
uint64_t start_ts = intel_ds_begin_submit(&batch->ds);
uint64_t submission_id = batch->ds.submission_id;
int ret = submit_batch(batch);
intel_ds_end_submit(batch->ds, start_ts);
intel_ds_end_submit(&batch->ds, start_ts);
/* When batch submission fails, our end-of-batch syncobj remains
* unsignalled, and in fact is not even considered submitted.
+1 -1
View File
@@ -196,7 +196,7 @@ struct iris_batch {
struct u_trace trace;
/** Batch wrapper structure for perfetto */
struct intel_ds_queue *ds;
struct intel_ds_queue ds;
};
void iris_init_batches(struct iris_context *ice, int priority);
+3 -4
View File
@@ -95,7 +95,7 @@ iris_utrace_delete_flush_data(struct u_trace_context *utctx,
void iris_utrace_flush(struct iris_batch *batch, uint64_t submission_id)
{
struct intel_ds_flush_data *flush_data = malloc(sizeof(*flush_data));
intel_ds_flush_data_init(flush_data, batch->ds, submission_id);
intel_ds_flush_data_init(flush_data, &batch->ds, submission_id);
u_trace_flush(&batch->trace, flush_data, false);
}
@@ -122,9 +122,8 @@ void iris_utrace_init(struct iris_context *ice)
iris_utrace_delete_flush_data);
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
ice->batches[i].ds =
intel_ds_device_add_queue(&ice->ds, "%s",
iris_batch_name_to_string(i));
intel_ds_device_init_queue(&ice->ds, &ice->batches[i].ds, "%s",
iris_batch_name_to_string(i));
}
}
+9 -11
View File
@@ -191,13 +191,11 @@ static void
send_descriptors(IntelRenderpassDataSource::TraceContext &ctx,
struct intel_ds_device *device)
{
struct intel_ds_queue *queue;
PERFETTO_LOG("Sending renderstage descriptors");
device->event_id = 0;
device->current_app_event_iid = device->start_app_event_iids;
u_vector_foreach(queue, &device->queues) {
list_for_each_entry_safe(struct intel_ds_queue, queue, &device->queues, link) {
for (uint32_t s = 0; s < ARRAY_SIZE(queue->stages); s++) {
queue->stages[s].start_ns[0] = 0;
}
@@ -231,7 +229,7 @@ send_descriptors(IntelRenderpassDataSource::TraceContext &ctx,
}
/* Emit all the IID picked at device/queue creation. */
u_vector_foreach(queue, &device->queues) {
list_for_each_entry_safe(struct intel_ds_queue, queue, &device->queues, link) {
for (unsigned s = 0; s < INTEL_DS_QUEUE_STAGE_N_STAGES; s++) {
{
/* We put the stage number in there so that all rows are order
@@ -607,7 +605,7 @@ intel_ds_device_init(struct intel_ds_device *device,
device->info = *devinfo;
device->iid = get_iid();
device->api = api;
u_vector_init(&device->queues, 4, sizeof(struct intel_ds_queue));
list_inithead(&device->queues);
/* Reserve iids for the application generated events */
device->start_app_event_iids = 1ull << 32;
@@ -620,16 +618,14 @@ intel_ds_device_fini(struct intel_ds_device *device)
{
u_trace_context_fini(&device->trace_context);
_mesa_hash_table_destroy(device->app_events, NULL);
u_vector_finish(&device->queues);
}
struct intel_ds_queue *
intel_ds_device_add_queue(struct intel_ds_device *device,
const char *fmt_name,
...)
intel_ds_device_init_queue(struct intel_ds_device *device,
struct intel_ds_queue *queue,
const char *fmt_name,
...)
{
struct intel_ds_queue *queue =
(struct intel_ds_queue *) u_vector_add(&device->queues);
va_list ap;
memset(queue, 0, sizeof(*queue));
@@ -645,6 +641,8 @@ intel_ds_device_add_queue(struct intel_ds_device *device,
queue->stages[s].stage_iid = get_iid();
}
list_add(&queue->link, &device->queues);
return queue;
}
+8 -4
View File
@@ -124,7 +124,7 @@ struct intel_ds_device {
struct u_trace_context trace_context;
/* List of intel_ds_queue */
struct u_vector queues;
struct list_head queues;
};
struct intel_ds_stage {
@@ -145,6 +145,8 @@ struct intel_ds_stage {
};
struct intel_ds_queue {
struct list_head link;
/* Device this queue belongs to */
struct intel_ds_device *device;
@@ -178,9 +180,11 @@ void intel_ds_device_init(struct intel_ds_device *device,
enum intel_ds_api api);
void intel_ds_device_fini(struct intel_ds_device *device);
struct intel_ds_queue *intel_ds_device_add_queue(struct intel_ds_device *device,
const char *fmt_name,
...);
struct intel_ds_queue *
intel_ds_device_init_queue(struct intel_ds_device *device,
struct intel_ds_queue *queue,
const char *fmt_name,
...);
void intel_ds_flush_data_init(struct intel_ds_flush_data *data,
struct intel_ds_queue *queue,
+2 -2
View File
@@ -1341,14 +1341,14 @@ anv_queue_submit(struct vk_queue *vk_queue,
return VK_SUCCESS;
}
uint64_t start_ts = intel_ds_begin_submit(queue->ds);
uint64_t start_ts = intel_ds_begin_submit(&queue->ds);
pthread_mutex_lock(&device->mutex);
result = anv_queue_submit_locked(queue, submit);
/* Take submission ID under lock */
pthread_mutex_unlock(&device->mutex);
intel_ds_end_submit(queue->ds, start_ts);
intel_ds_end_submit(&queue->ds, start_ts);
return result;
}
+1 -1
View File
@@ -1055,7 +1055,7 @@ struct anv_queue {
/** Synchronization object for debug purposes (DEBUG_SYNC) */
struct vk_sync *sync;
struct intel_ds_queue * ds;
struct intel_ds_queue ds;
};
struct nir_xfb_info;
+4 -5
View File
@@ -114,7 +114,7 @@ anv_device_utrace_flush_cmd_buffers(struct anv_queue *queue,
if (!flush)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
intel_ds_flush_data_init(&flush->ds, queue->ds, queue->ds->submission_id);
intel_ds_flush_data_init(&flush->ds, &queue->ds, queue->ds.submission_id);
result = vk_sync_create(&device->vk, &device->physical->sync_syncobj_type,
0, 0, &flush->sync);
@@ -288,10 +288,9 @@ anv_device_utrace_init(struct anv_device *device)
for (uint32_t q = 0; q < device->queue_count; q++) {
struct anv_queue *queue = &device->queues[q];
queue->ds =
intel_ds_device_add_queue(&device->ds, "%s%u",
intel_engines_class_to_string(queue->family->engine_class),
queue->vk.index_in_family);
intel_ds_device_init_queue(&device->ds, &queue->ds, "%s%u",
intel_engines_class_to_string(queue->family->engine_class),
queue->vk.index_in_family);
}
}
+2 -2
View File
@@ -2385,14 +2385,14 @@ anv_queue_submit(struct vk_queue *vk_queue,
return VK_SUCCESS;
}
uint64_t start_ts = intel_ds_begin_submit(queue->ds);
uint64_t start_ts = intel_ds_begin_submit(&queue->ds);
pthread_mutex_lock(&device->mutex);
result = anv_queue_submit_locked(queue, submit);
/* Take submission ID under lock */
pthread_mutex_unlock(&device->mutex);
intel_ds_end_submit(queue->ds, start_ts);
intel_ds_end_submit(&queue->ds, start_ts);
return result;
}
+1 -1
View File
@@ -972,7 +972,7 @@ struct anv_queue {
/** Synchronization object for debug purposes (DEBUG_SYNC) */
struct vk_sync *sync;
struct intel_ds_queue * ds;
struct intel_ds_queue ds;
};
struct nir_xfb_info;
+4 -5
View File
@@ -111,7 +111,7 @@ anv_device_utrace_flush_cmd_buffers(struct anv_queue *queue,
if (!flush)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
intel_ds_flush_data_init(&flush->ds, queue->ds, queue->ds->submission_id);
intel_ds_flush_data_init(&flush->ds, &queue->ds, queue->ds.submission_id);
result = vk_sync_create(&device->vk, &device->physical->sync_syncobj_type,
0, 0, &flush->sync);
@@ -284,10 +284,9 @@ anv_device_utrace_init(struct anv_device *device)
for (uint32_t q = 0; q < device->queue_count; q++) {
struct anv_queue *queue = &device->queues[q];
queue->ds =
intel_ds_device_add_queue(&device->ds, "%s%u",
intel_engines_class_to_string(queue->family->engine_class),
queue->vk.index_in_family);
intel_ds_device_init_queue(&device->ds, &queue->ds, "%s%u",
intel_engines_class_to_string(queue->family->engine_class),
queue->vk.index_in_family);
}
}