freedreno: Add seqno helper

It is a pretty common pattern to allocate a non-zero sequence # for
lightweight checking if an object is the same, changed, for use in cache
keys, etc.  (And also pretty common to forget to handle the rollover
zero case.)  Add a helper for this.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21274>
This commit is contained in:
Rob Clark
2023-02-11 12:22:31 -08:00
committed by Marge Bot
parent 8f2b22ba66
commit 6747d30155
8 changed files with 39 additions and 11 deletions
+28
View File
@@ -24,6 +24,7 @@
#ifndef FREEDRENO_COMMON_H_
#define FREEDRENO_COMMON_H_
#include "util/u_atomic.h"
/**
* Helper macro to get around c++ being cranky about an enum that is a bitmask
@@ -56,4 +57,31 @@
#define BIT(bit) BITFIELD64_BIT(bit)
/**
* Helper for allocating sequence #s where zero is a non-valid seqno
*/
typedef struct {
uint32_t counter;
} seqno_t;
static inline uint32_t
seqno_next(seqno_t *seq)
{
uint32_t n;
do {
n = p_atomic_inc_return(&seq->counter);
} while (n == 0);
return n;
}
static inline uint16_t
seqno_next_u16(seqno_t *seq)
{
uint16_t n;
do {
n = p_atomic_inc_return(&seq->counter);
} while (n == 0);
return n;
}
#endif /* FREEDRENO_COMMON_H_ */
@@ -124,7 +124,7 @@ struct fd6_context {
struct hash_table *bcolor_cache;
struct fd_bo *bcolor_mem;
uint16_t tex_seqno;
seqno_t tex_seqno;
struct hash_table *tex_cache;
bool tex_cache_needs_invalidate;
@@ -263,7 +263,7 @@ fd6_sampler_state_create(struct pipe_context *pctx,
return NULL;
so->base = *cso;
so->seqno = ++fd6_context(ctx)->tex_seqno;
so->seqno = seqno_next_u16(&fd6_context(ctx)->tex_seqno);
if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR)
miplinear = true;
@@ -395,7 +395,7 @@ fd6_sampler_view_update(struct fd_context *ctx,
format = rsc->b.b.format;
}
so->seqno = ++fd6_context(ctx)->tex_seqno;
so->seqno = seqno_next_u16(&fd6_context(ctx)->tex_seqno);
so->ptr1 = rsc;
so->rsc_seqno = rsc->seqno;
@@ -430,7 +430,7 @@ alloc_batch_locked(struct fd_batch_cache *cache, struct fd_context *ctx,
if (!batch)
return NULL;
batch->seqno = cache->cnt++;
batch->seqno = seqno_next(&cache->cnt);
batch->idx = idx;
cache->batch_mask |= (1 << idx);
@@ -40,7 +40,7 @@ struct hash_table;
struct fd_batch_cache {
struct hash_table *ht;
unsigned cnt;
seqno_t cnt;
/* set of active batches.. there is an upper limit on the number of
* in-flight batches, for two reasons:
@@ -705,7 +705,7 @@ fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
list_inithead(&ctx->acc_active_queries);
fd_screen_lock(ctx->screen);
ctx->seqno = ++screen->ctx_seqno;
ctx->seqno = seqno_next_u16(&screen->ctx_seqno);
list_add(&ctx->node, &ctx->screen->context_list);
fd_screen_unlock(ctx->screen);
@@ -182,7 +182,7 @@ fd_resource_set_bo(struct fd_resource *rsc, struct fd_bo *bo)
struct fd_screen *screen = fd_screen(rsc->b.b.screen);
rsc->bo = bo;
rsc->seqno = p_atomic_inc_return(&screen->rsc_seqno);
rsc->seqno = seqno_next_u16(&screen->rsc_seqno);
}
int
@@ -310,7 +310,7 @@ fd_replace_buffer_storage(struct pipe_context *pctx, struct pipe_resource *pdst,
fd_resource_tracking_reference(&dst->track, src->track);
src->is_replacement = true;
dst->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
dst->seqno = seqno_next_u16(&ctx->screen->rsc_seqno);
fd_screen_unlock(ctx->screen);
}
@@ -461,7 +461,7 @@ fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
rsc->needs_ubwc_clear = temp;
swap(rsc->layout, shadow->layout);
rsc->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
rsc->seqno = seqno_next_u16(&ctx->screen->rsc_seqno);
/* at this point, the newly created shadow buffer is not referenced
* by any batches, but the existing rsc (probably) is. We need to
@@ -139,8 +139,8 @@ struct fd_screen {
bool reorder;
uint16_t rsc_seqno;
uint16_t ctx_seqno;
seqno_t rsc_seqno;
seqno_t ctx_seqno;
struct util_idalloc_mt buffer_ids;
unsigned num_supported_modifiers;