@@ -145,20 +145,30 @@ batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
|
||||
}
|
||||
|
||||
static void
|
||||
batch_reset_resources(struct fd_batch *batch)
|
||||
batch_reset_resources_locked(struct fd_batch *batch)
|
||||
{
|
||||
struct set_entry *entry;
|
||||
|
||||
pipe_mutex_assert_locked(batch->ctx->screen->lock);
|
||||
|
||||
set_foreach(batch->resources, entry) {
|
||||
struct fd_resource *rsc = (struct fd_resource *)entry->key;
|
||||
_mesa_set_remove(batch->resources, entry);
|
||||
debug_assert(rsc->batch_mask & (1 << batch->idx));
|
||||
rsc->batch_mask &= ~(1 << batch->idx);
|
||||
if (rsc->write_batch == batch)
|
||||
fd_batch_reference(&rsc->write_batch, NULL);
|
||||
fd_batch_reference_locked(&rsc->write_batch, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
batch_reset_resources(struct fd_batch *batch)
|
||||
{
|
||||
pipe_mutex_lock(batch->ctx->screen->lock);
|
||||
batch_reset_resources_locked(batch);
|
||||
pipe_mutex_unlock(batch->ctx->screen->lock);
|
||||
}
|
||||
|
||||
static void
|
||||
batch_reset(struct fd_batch *batch)
|
||||
{
|
||||
@@ -183,12 +193,14 @@ fd_batch_reset(struct fd_batch *batch)
|
||||
void
|
||||
__fd_batch_destroy(struct fd_batch *batch)
|
||||
{
|
||||
fd_bc_invalidate_batch(batch, true);
|
||||
|
||||
DBG("%p", batch);
|
||||
|
||||
util_copy_framebuffer_state(&batch->framebuffer, NULL);
|
||||
|
||||
pipe_mutex_lock(batch->ctx->screen->lock);
|
||||
fd_bc_invalidate_batch(batch, true);
|
||||
pipe_mutex_unlock(batch->ctx->screen->lock);
|
||||
|
||||
batch_fini(batch);
|
||||
|
||||
batch_reset_resources(batch);
|
||||
@@ -267,7 +279,9 @@ batch_flush(struct fd_batch *batch)
|
||||
if (batch == batch->ctx->batch) {
|
||||
batch_reset(batch);
|
||||
} else {
|
||||
pipe_mutex_lock(batch->ctx->screen->lock);
|
||||
fd_bc_invalidate_batch(batch, false);
|
||||
pipe_mutex_unlock(batch->ctx->screen->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -315,10 +329,12 @@ batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
|
||||
*/
|
||||
if (batch_depends_on(dep, batch)) {
|
||||
DBG("%p: flush forced on %p!", batch, dep);
|
||||
pipe_mutex_unlock(batch->ctx->screen->lock);
|
||||
fd_batch_flush(dep, false);
|
||||
pipe_mutex_lock(batch->ctx->screen->lock);
|
||||
} else {
|
||||
struct fd_batch *other = NULL;
|
||||
fd_batch_reference(&other, dep);
|
||||
fd_batch_reference_locked(&other, dep);
|
||||
batch->dependents_mask |= (1 << dep->idx);
|
||||
DBG("%p: added dependency on %p", batch, dep);
|
||||
}
|
||||
@@ -327,6 +343,8 @@ batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
|
||||
void
|
||||
fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
|
||||
{
|
||||
pipe_mutex_assert_locked(batch->ctx->screen->lock);
|
||||
|
||||
if (rsc->stencil)
|
||||
fd_batch_resource_used(batch, rsc->stencil, write);
|
||||
|
||||
@@ -353,7 +371,7 @@ fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool wri
|
||||
fd_batch_reference_locked(&b, NULL);
|
||||
}
|
||||
}
|
||||
fd_batch_reference(&rsc->write_batch, batch);
|
||||
fd_batch_reference_locked(&rsc->write_batch, batch);
|
||||
} else {
|
||||
if (rsc->write_batch) {
|
||||
batch_add_dep(batch, rsc->write_batch);
|
||||
|
||||
@@ -207,6 +207,17 @@ void fd_batch_check_size(struct fd_batch *batch);
|
||||
void __fd_batch_describe(char* buf, const struct fd_batch *batch);
|
||||
void __fd_batch_destroy(struct fd_batch *batch);
|
||||
|
||||
/*
|
||||
* NOTE the rule is, you need to hold the screen->lock when destroying
|
||||
* a batch.. so either use fd_batch_reference() (which grabs the lock
|
||||
* for you) if you don't hold the lock, or fd_batch_reference_locked()
|
||||
* if you do hold the lock.
|
||||
*
|
||||
* WARNING the _locked() version can briefly drop the lock. Without
|
||||
* recursive mutexes, I'm not sure there is much else we can do (since
|
||||
* __fd_batch_destroy() needs to unref resources)
|
||||
*/
|
||||
|
||||
static inline void
|
||||
fd_batch_reference(struct fd_batch **ptr, struct fd_batch *batch)
|
||||
{
|
||||
@@ -217,6 +228,33 @@ fd_batch_reference(struct fd_batch **ptr, struct fd_batch *batch)
|
||||
*ptr = batch;
|
||||
}
|
||||
|
||||
/* fwd-decl prototypes to untangle header dependency :-/ */
|
||||
static inline void fd_context_assert_locked(struct fd_context *ctx);
|
||||
static inline void fd_context_lock(struct fd_context *ctx);
|
||||
static inline void fd_context_unlock(struct fd_context *ctx);
|
||||
|
||||
static inline void
|
||||
fd_batch_reference_locked(struct fd_batch **ptr, struct fd_batch *batch)
|
||||
{
|
||||
struct fd_batch *old_batch = *ptr;
|
||||
|
||||
if (old_batch)
|
||||
fd_context_assert_locked(old_batch->ctx);
|
||||
else if (batch)
|
||||
fd_context_assert_locked(batch->ctx);
|
||||
|
||||
if (pipe_reference_described(&(*ptr)->reference, &batch->reference,
|
||||
(debug_reference_descriptor)__fd_batch_describe)) {
|
||||
struct fd_context *ctx = old_batch->ctx;
|
||||
fd_context_unlock(ctx);
|
||||
__fd_batch_destroy(old_batch);
|
||||
fd_context_lock(ctx);
|
||||
}
|
||||
*ptr = batch;
|
||||
}
|
||||
|
||||
#include "freedreno_context.h"
|
||||
|
||||
static inline void
|
||||
fd_reset_wfi(struct fd_batch *batch)
|
||||
{
|
||||
|
||||
@@ -130,16 +130,22 @@ fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
|
||||
struct hash_entry *entry;
|
||||
struct fd_batch *last_batch = NULL;
|
||||
|
||||
pipe_mutex_lock(ctx->screen->lock);
|
||||
|
||||
hash_table_foreach(cache->ht, entry) {
|
||||
struct fd_batch *batch = NULL;
|
||||
fd_batch_reference(&batch, (struct fd_batch *)entry->data);
|
||||
fd_batch_reference_locked(&batch, (struct fd_batch *)entry->data);
|
||||
if (batch->ctx == ctx) {
|
||||
pipe_mutex_unlock(ctx->screen->lock);
|
||||
fd_batch_reference(&last_batch, batch);
|
||||
fd_batch_flush(batch, false);
|
||||
pipe_mutex_lock(ctx->screen->lock);
|
||||
}
|
||||
fd_batch_reference(&batch, NULL);
|
||||
fd_batch_reference_locked(&batch, NULL);
|
||||
}
|
||||
|
||||
pipe_mutex_unlock(ctx->screen->lock);
|
||||
|
||||
if (last_batch) {
|
||||
fd_batch_sync(last_batch);
|
||||
fd_batch_reference(&last_batch, NULL);
|
||||
@@ -154,20 +160,27 @@ fd_bc_invalidate_context(struct fd_context *ctx)
|
||||
struct fd_batch_cache *cache = &ctx->screen->batch_cache;
|
||||
struct fd_batch *batch;
|
||||
|
||||
pipe_mutex_lock(ctx->screen->lock);
|
||||
|
||||
foreach_batch(batch, cache, cache->batch_mask) {
|
||||
if (batch->ctx == ctx) {
|
||||
fd_batch_reset(batch);
|
||||
fd_batch_reference(&batch, NULL);
|
||||
}
|
||||
if (batch->ctx == ctx)
|
||||
fd_batch_reference_locked(&batch, NULL);
|
||||
}
|
||||
|
||||
pipe_mutex_unlock(ctx->screen->lock);
|
||||
}
|
||||
|
||||
void
|
||||
fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
|
||||
{
|
||||
if (!batch)
|
||||
return;
|
||||
|
||||
struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
|
||||
struct key *key = (struct key *)batch->key;
|
||||
|
||||
pipe_mutex_assert_locked(batch->ctx->screen->lock);
|
||||
|
||||
if (destroy) {
|
||||
cache->batches[batch->idx] = NULL;
|
||||
cache->batch_mask &= ~(1 << batch->idx);
|
||||
@@ -194,7 +207,9 @@ void
|
||||
fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
|
||||
{
|
||||
struct fd_screen *screen = fd_screen(rsc->base.b.screen);
|
||||
struct fd_batch *batch;
|
||||
struct fd_batch *batch;
|
||||
|
||||
pipe_mutex_lock(screen->lock);
|
||||
|
||||
if (destroy) {
|
||||
foreach_batch(batch, &screen->batch_cache, rsc->batch_mask) {
|
||||
@@ -203,13 +218,15 @@ fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
|
||||
}
|
||||
rsc->batch_mask = 0;
|
||||
|
||||
fd_batch_reference(&rsc->write_batch, NULL);
|
||||
fd_batch_reference_locked(&rsc->write_batch, NULL);
|
||||
}
|
||||
|
||||
foreach_batch(batch, &screen->batch_cache, rsc->bc_batch_mask)
|
||||
fd_bc_invalidate_batch(batch, false);
|
||||
|
||||
rsc->bc_batch_mask = 0;
|
||||
|
||||
pipe_mutex_unlock(screen->lock);
|
||||
}
|
||||
|
||||
struct fd_batch *
|
||||
@@ -218,6 +235,8 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
|
||||
struct fd_batch *batch;
|
||||
uint32_t idx;
|
||||
|
||||
pipe_mutex_lock(ctx->screen->lock);
|
||||
|
||||
while ((idx = ffs(~cache->batch_mask)) == 0) {
|
||||
#if 0
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
|
||||
@@ -240,10 +259,16 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
|
||||
!cache->batches[i]->needs_flush)
|
||||
continue;
|
||||
if (!flush_batch || (cache->batches[i]->seqno < flush_batch->seqno))
|
||||
fd_batch_reference(&flush_batch, cache->batches[i]);
|
||||
fd_batch_reference_locked(&flush_batch, cache->batches[i]);
|
||||
}
|
||||
|
||||
/* we can drop lock temporarily here, since we hold a ref,
|
||||
* flush_batch won't disappear under us.
|
||||
*/
|
||||
pipe_mutex_unlock(ctx->screen->lock);
|
||||
DBG("%p: too many batches! flush forced!", flush_batch);
|
||||
fd_batch_flush(flush_batch, true);
|
||||
pipe_mutex_lock(ctx->screen->lock);
|
||||
|
||||
/* While the resources get cleaned up automatically, the flush_batch
|
||||
* doesn't get removed from the dependencies of other batches, so
|
||||
@@ -259,18 +284,18 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
|
||||
if (other->dependents_mask & (1 << flush_batch->idx)) {
|
||||
other->dependents_mask &= ~(1 << flush_batch->idx);
|
||||
struct fd_batch *ref = flush_batch;
|
||||
fd_batch_reference(&ref, NULL);
|
||||
fd_batch_reference_locked(&ref, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
fd_batch_reference(&flush_batch, NULL);
|
||||
fd_batch_reference_locked(&flush_batch, NULL);
|
||||
}
|
||||
|
||||
idx--; /* bit zero returns 1 for ffs() */
|
||||
|
||||
batch = fd_batch_create(ctx);
|
||||
if (!batch)
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
batch->seqno = cache->cnt++;
|
||||
batch->idx = idx;
|
||||
@@ -279,6 +304,9 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
|
||||
debug_assert(cache->batches[idx] == NULL);
|
||||
cache->batches[idx] = batch;
|
||||
|
||||
out:
|
||||
pipe_mutex_unlock(ctx->screen->lock);
|
||||
|
||||
return batch;
|
||||
}
|
||||
|
||||
@@ -312,6 +340,8 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
|
||||
if (!batch)
|
||||
return NULL;
|
||||
|
||||
pipe_mutex_lock(ctx->screen->lock);
|
||||
|
||||
_mesa_hash_table_insert_pre_hashed(cache->ht, hash, key, batch);
|
||||
batch->key = key;
|
||||
batch->hash = hash;
|
||||
@@ -321,6 +351,8 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
|
||||
rsc->bc_batch_mask = (1 << batch->idx);
|
||||
}
|
||||
|
||||
pipe_mutex_unlock(ctx->screen->lock);
|
||||
|
||||
return batch;
|
||||
}
|
||||
|
||||
|
||||
@@ -29,7 +29,9 @@
|
||||
|
||||
#include "pipe/p_state.h"
|
||||
|
||||
#include "freedreno_batch.h"
|
||||
struct fd_resource;
|
||||
struct fd_batch;
|
||||
struct fd_context;
|
||||
|
||||
struct hash_table;
|
||||
|
||||
|
||||
@@ -283,6 +283,24 @@ fd_context(struct pipe_context *pctx)
|
||||
return (struct fd_context *)pctx;
|
||||
}
|
||||
|
||||
static inline void
|
||||
fd_context_assert_locked(struct fd_context *ctx)
|
||||
{
|
||||
pipe_mutex_assert_locked(ctx->screen->lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
fd_context_lock(struct fd_context *ctx)
|
||||
{
|
||||
pipe_mutex_lock(ctx->screen->lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
fd_context_unlock(struct fd_context *ctx)
|
||||
{
|
||||
pipe_mutex_unlock(ctx->screen->lock);
|
||||
}
|
||||
|
||||
static inline struct pipe_scissor_state *
|
||||
fd_context_get_scissor(struct fd_context *ctx)
|
||||
{
|
||||
|
||||
@@ -101,6 +101,8 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
|
||||
* Figure out the buffers/features we need:
|
||||
*/
|
||||
|
||||
pipe_mutex_lock(ctx->screen->lock);
|
||||
|
||||
if (fd_depth_enabled(ctx)) {
|
||||
buffers |= FD_BUFFER_DEPTH;
|
||||
resource_written(batch, pfb->zsbuf->texture);
|
||||
@@ -161,6 +163,8 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
|
||||
|
||||
resource_written(batch, batch->query_buf);
|
||||
|
||||
pipe_mutex_unlock(ctx->screen->lock);
|
||||
|
||||
batch->num_draws++;
|
||||
|
||||
prims = u_reduced_prims_for_vertices(info->mode, info->count);
|
||||
@@ -249,6 +253,8 @@ fd_clear(struct pipe_context *pctx, unsigned buffers,
|
||||
batch->resolve |= buffers;
|
||||
batch->needs_flush = true;
|
||||
|
||||
pipe_mutex_lock(ctx->screen->lock);
|
||||
|
||||
if (buffers & PIPE_CLEAR_COLOR)
|
||||
for (i = 0; i < pfb->nr_cbufs; i++)
|
||||
if (buffers & (PIPE_CLEAR_COLOR0 << i))
|
||||
@@ -261,6 +267,8 @@ fd_clear(struct pipe_context *pctx, unsigned buffers,
|
||||
|
||||
resource_written(batch, batch->query_buf);
|
||||
|
||||
pipe_mutex_unlock(ctx->screen->lock);
|
||||
|
||||
DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
|
||||
pfb->width, pfb->height, depth, stencil,
|
||||
util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
|
||||
|
||||
@@ -179,6 +179,8 @@ fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
|
||||
*/
|
||||
fd_bc_invalidate_resource(rsc, false);
|
||||
|
||||
pipe_mutex_lock(ctx->screen->lock);
|
||||
|
||||
/* Swap the backing bo's, so shadow becomes the old buffer,
|
||||
* blit from shadow to new buffer. From here on out, we
|
||||
* cannot fail.
|
||||
@@ -210,6 +212,8 @@ fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
|
||||
}
|
||||
swap(rsc->batch_mask, shadow->batch_mask);
|
||||
|
||||
pipe_mutex_unlock(ctx->screen->lock);
|
||||
|
||||
struct pipe_blit_info blit = {0};
|
||||
blit.dst.resource = prsc;
|
||||
blit.dst.format = prsc->format;
|
||||
@@ -487,10 +491,15 @@ fd_resource_transfer_map(struct pipe_context *pctx,
|
||||
* to wait.
|
||||
*/
|
||||
} else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
|
||||
if ((usage & PIPE_TRANSFER_WRITE) && rsc->write_batch &&
|
||||
rsc->write_batch->back_blit) {
|
||||
struct fd_batch *write_batch = NULL;
|
||||
|
||||
/* hold a reference, so it doesn't disappear under us: */
|
||||
fd_batch_reference(&write_batch, rsc->write_batch);
|
||||
|
||||
if ((usage & PIPE_TRANSFER_WRITE) && write_batch &&
|
||||
write_batch->back_blit) {
|
||||
/* if only thing pending is a back-blit, we can discard it: */
|
||||
fd_batch_reset(rsc->write_batch);
|
||||
fd_batch_reset(write_batch);
|
||||
}
|
||||
|
||||
/* If the GPU is writing to the resource, or if it is reading from the
|
||||
@@ -527,11 +536,13 @@ fd_resource_transfer_map(struct pipe_context *pctx,
|
||||
}
|
||||
assert(rsc->batch_mask == 0);
|
||||
} else {
|
||||
fd_batch_flush(rsc->write_batch, true);
|
||||
fd_batch_flush(write_batch, true);
|
||||
}
|
||||
assert(!rsc->write_batch);
|
||||
}
|
||||
|
||||
fd_batch_reference(&write_batch, NULL);
|
||||
|
||||
/* The GPU keeps track of how the various bo's are being used, and
|
||||
* will wait if necessary for the proper operation to have
|
||||
* completed.
|
||||
|
||||
@@ -138,6 +138,8 @@ fd_screen_destroy(struct pipe_screen *pscreen)
|
||||
|
||||
fd_bc_fini(&screen->batch_cache);
|
||||
|
||||
pipe_mutex_destroy(screen->lock);
|
||||
|
||||
free(screen);
|
||||
}
|
||||
|
||||
@@ -676,6 +678,8 @@ fd_screen_create(struct fd_device *dev)
|
||||
|
||||
fd_bc_init(&screen->batch_cache);
|
||||
|
||||
pipe_mutex_init(screen->lock);
|
||||
|
||||
pscreen->destroy = fd_screen_destroy;
|
||||
pscreen->get_param = fd_screen_get_param;
|
||||
pscreen->get_paramf = fd_screen_get_paramf;
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
|
||||
#include "pipe/p_screen.h"
|
||||
#include "util/u_memory.h"
|
||||
#include "os/os_thread.h"
|
||||
|
||||
#include "freedreno_batch_cache.h"
|
||||
|
||||
@@ -42,6 +43,8 @@ struct fd_bo;
|
||||
struct fd_screen {
|
||||
struct pipe_screen base;
|
||||
|
||||
pipe_mutex lock;
|
||||
|
||||
/* it would be tempting to use pipe_reference here, but that
|
||||
* really doesn't work well if it isn't the first member of
|
||||
* the struct, so not quite so awesome to be adding refcnting
|
||||
|
||||
Reference in New Issue
Block a user