Merge branch 'gallium-i915-current' into gallium-0.1

This commit is contained in:
Jakob Bornecrantz
2008-05-23 16:26:14 +02:00
60 changed files with 10834 additions and 744 deletions
+1
View File
@@ -108,6 +108,7 @@ linux-dri-x86 \
linux-dri-x86-64 \
linux-dri-ppc \
linux-dri-xcb \
linux-egl \
linux-indirect \
linux-fbdev \
linux-glide \
+3 -1
View File
@@ -59,8 +59,10 @@ SRC_DIRS := egl $(SRC_DIRS)
PROGRAM_DIRS = egl
endif
DRIVER_DIRS = dri
WINDOW_SYSTEM=dri
WINDOW_SYSTEM = dri
GALLIUM_WINSYS_DIRS = dri
# gamma are missing because they have not been converted to use the new
+65
View File
@@ -0,0 +1,65 @@
# -*-makefile-*-
# Configuration for linux-dri: Linux DRI hardware drivers for XFree86 & others
include $(TOP)/configs/default
CONFIG_NAME = linux-dri
# Compiler and flags
CC = gcc
CXX = g++
#MKDEP = /usr/X11R6/bin/makedepend
#MKDEP = gcc -M
#MKDEP_OPTIONS = -MF depend
OPT_FLAGS = -O -g
PIC_FLAGS = -fPIC
# Add '-DGLX_USE_TLS' to ARCH_FLAGS to enable TLS support.
ARCH_FLAGS ?=
DEFINES = -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE \
-D_BSD_SOURCE -D_GNU_SOURCE \
-DPTHREADS -DUSE_EXTERNAL_DXTN_LIB=1 -DIN_DRI_DRIVER \
-DGLX_DIRECT_RENDERING -DGLX_INDIRECT_RENDERING \
-DHAVE_ALIAS -DHAVE_POSIX_MEMALIGN
X11_INCLUDES = -I/usr/X11R6/include
CFLAGS = -Wall -Wmissing-prototypes -std=c99 -ffast-math \
$(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) $(DEFINES) $(ASM_FLAGS)
CXXFLAGS = -Wall $(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) $(DEFINES)
MESA_ASM_SOURCES =
# Library/program dependencies
EXTRA_LIB_PATH=-L/usr/X11R6/lib
LIBDRM_CFLAGS = $(shell pkg-config --cflags libdrm)
LIBDRM_LIB = $(shell pkg-config --libs libdrm)
DRI_LIB_DEPS = $(EXTRA_LIB_PATH) -lm -lpthread -lexpat -ldl $(LIBDRM_LIB)
GL_LIB_DEPS = $(EXTRA_LIB_PATH) -lX11 -lXext -lXxf86vm -lXdamage -lXfixes \
-lm -lpthread -ldl \
$(LIBDRM_LIB)
# This is now 0 by default since it seems to confuse the hell out of people
# and generate a lot of extra noise on bugzilla. If you need to build with
# EGL, do 'make linux-dri USING_EGL=1'
USING_EGL=0
# Directories
SRC_DIRS = gallium mesa gallium/winsys glu egl
PROGRAM_DIRS = egl
DRIVER_DIRS = dri
WINDOW_SYSTEM = dri
GALLIUM_WINSYS_DIRS = egl_drm
# gamma are missing because they have not been converted to use the new
# interface.
DRI_DIRS = intel
+7 -1
View File
@@ -46,7 +46,7 @@ demo3.o: demo3.c $(HEADERS)
eglinfo: eglinfo.o $(TOP)/$(LIB_DIR)/libEGL.so
$(CC) $(CFLAGS) eglinfo.o -L$(TOP)/$(LIB_DIR) -lEGL $(LIBDRM_LIB) -o $@
$(CC) $(CFLAGS) eglinfo.o -L$(TOP)/$(LIB_DIR) -lGL -lEGL $(LIBDRM_LIB) -o $@
eglinfo.o: eglinfo.c $(HEADERS)
$(CC) -c $(CFLAGS) -I$(TOP)/include eglinfo.c
@@ -63,3 +63,9 @@ clean:
rm -f *.o *~
rm -f *.so
rm -f $(PROGRAMS)
run:
LD_LIBRARY_PATH=$(TOP)/lib ./eglgears
debug:
LD_LIBRARY_PATH=$(TOP)/lib gdb ./eglgears
+1 -1
View File
@@ -102,7 +102,7 @@ main(int argc, char *argv[])
/*
EGLDisplay d = eglGetDisplay(EGL_DEFAULT_DISPLAY);
*/
EGLDisplay d = eglGetDisplay("!fb_dri");
EGLDisplay d = eglGetDisplay("!EGL_i915");
assert(d);
if (!eglInitialize(d, &maj, &min)) {
+1 -1
View File
@@ -102,7 +102,7 @@ main(int argc, char *argv[])
/*
EGLDisplay d = eglGetDisplay(EGL_DEFAULT_DISPLAY);
*/
EGLDisplay d = eglGetDisplay("!fb_dri");
EGLDisplay d = eglGetDisplay("!EGL_i915");
assert(d);
if (!eglInitialize(d, &maj, &min)) {
+1 -1
View File
@@ -576,7 +576,7 @@ main(int argc, char *argv[])
/*
EGLDisplay d = eglGetDisplay(EGL_DEFAULT_DISPLAY);
*/
EGLDisplay d = eglGetDisplay(":0");
EGLDisplay d = eglGetDisplay("!EGL_i915");
assert(d);
if (!eglInitialize(d, &maj, &min)) {
+1 -1
View File
@@ -385,7 +385,7 @@ main(int argc, char *argv[])
}
/* DBR : Create EGL context/surface etc */
d = eglGetDisplay(":0");
d = eglGetDisplay("!EGL_i915");
assert(d);
if (!eglInitialize(d, &maj, &min)) {
+1 -1
View File
@@ -140,7 +140,7 @@ main(int argc, char *argv[])
{
int maj, min;
/*EGLDisplay d = eglGetDisplay(EGL_DEFAULT_DISPLAY);*/
EGLDisplay d = eglGetDisplay(":0");
EGLDisplay d = eglGetDisplay("!EGL_i915");
if (!eglInitialize(d, &maj, &min)) {
printf("eglinfo: eglInitialize failed\n");
+3
View File
@@ -204,7 +204,10 @@ mem_dup(const void *src, uint size)
#define MIN2( A, B ) ( (A)<(B) ? (A) : (B) )
#define MAX2( A, B ) ( (A)>(B) ? (A) : (B) )
/* JB stop warnings */
#ifndef Elements
#define Elements(x) (sizeof(x)/sizeof((x)[0]))
#endif
#define Offset(TYPE, MEMBER) ((unsigned)&(((TYPE *)NULL)->MEMBER))
/**
+6 -3
View File
@@ -2,7 +2,7 @@
TOP = ../../../../..
include $(TOP)/configs/current
LIBNAME = i915tex_dri.so
LIBNAME = i915_dri.so
MINIGLX_SOURCES = server/intel_dri.c
@@ -19,11 +19,14 @@ DRIVER_SOURCES = \
intel_context.c \
intel_lock.c \
intel_screen.c \
intel_batchpool.c
ws_dri_bufmgr.c \
ws_dri_drmpool.c \
ws_dri_fencemgr.c \
ws_dri_mallocpool.c \
ws_dri_slabpool.c
C_SOURCES = \
$(COMMON_GALLIUM_SOURCES) \
$(COMMON_BM_SOURCES) \
$(DRIVER_SOURCES)
ASM_SOURCES =
+295 -193
View File
@@ -25,108 +25,95 @@
*
**************************************************************************/
#include <errno.h>
#include <stdio.h>
#include "intel_batchbuffer.h"
#include "intel_context.h"
#include "intel_screen.h"
#include "intel_reg.h"
#include "drm.h"
/* Relocations in kernel space:
* - pass dma buffer seperately
* - memory manager knows how to patch
* - pass list of dependent buffers
* - pass relocation list
*
* Either:
* - get back an offset for buffer to fire
* - memory manager knows how to fire buffer
*
* Really want the buffer to be AGP and pinned.
*
*/
/* Cliprect fence: The highest fence protecting a dma buffer
* containing explicit cliprect information. Like the old drawable
* lock but irq-driven. X server must wait for this fence to expire
* before changing cliprects [and then doing sw rendering?]. For
* other dma buffers, the scheduler will grab current cliprect info
* and mix into buffer. X server must hold the lock while changing
* cliprects??? Make per-drawable. Need cliprects in shared memory
* -- beats storing them with every cmd buffer in the queue.
*
* ==> X server must wait for this fence to expire before touching the
* framebuffer with new cliprects.
*
* ==> Cliprect-dependent buffers associated with a
* cliprect-timestamp. All of the buffers associated with a timestamp
* must go to hardware before any buffer with a newer timestamp.
*
* ==> Dma should be queued per-drawable for correct X/GL
* synchronization. Or can fences be used for this?
*
* Applies to: Blit operations, metaops, X server operations -- X
* server automatically waits on its own dma to complete before
* modifying cliprects ???
*/
#include <errno.h>
#if 0
static void
intel_dump_batchbuffer(uint offset, uint * ptr, uint count)
intel_dump_batchbuffer(GLuint offset, GLuint * ptr, GLuint count)
{
int i;
printf("\n\n\nSTART BATCH (%d dwords):\n", count / 4);
for (i = 0; i < count / 4; i += 1)
printf("\t0x%08x\n", ptr[i]);
printf("END BATCH\n\n\n");
fprintf(stderr, "\n\n\nSTART BATCH (%d dwords):\n", count / 4);
for (i = 0; i < count / 4; i += 4)
fprintf(stderr, "0x%x:\t0x%08x 0x%08x 0x%08x 0x%08x\n",
offset + i * 4, ptr[i], ptr[i + 1], ptr[i + 2], ptr[i + 3]);
fprintf(stderr, "END BATCH\n\n\n");
}
#endif
static void
intel_realloc_relocs(struct intel_batchbuffer *batch, int num_relocs)
{
unsigned long size = num_relocs * I915_RELOC0_STRIDE + I915_RELOC_HEADER;
size *= sizeof(uint32_t);
batch->reloc = realloc(batch->reloc, size);
batch->reloc_size = num_relocs;
}
void
intel_batchbuffer_reset(struct intel_batchbuffer *batch)
{
int i;
if (batch->map) {
driBOUnmap(batch->buffer);
batch->map = NULL;
}
/*
* Get a new, free batchbuffer.
*/
batch->size = BATCH_SZ;
driBOData(batch->buffer, batch->size, NULL, 0);
drmBO *bo;
struct drm_bo_info_req *req;
driBOUnrefUserList(batch->list);
driBOResetList(batch->list);
driBOResetList(&batch->list);
batch->size = 4 * 4096; // ZZZ JB batch->intel->intelScreen->maxBatchSize;
driBOData(batch->buffer, batch->size, NULL, NULL, 0);
/*
* Unreference buffers previously on the relocation list.
* Add the batchbuffer to the validate list.
*/
for (i = 0; i < batch->nr_relocs; i++) {
struct buffer_reloc *r = &batch->reloc[i];
driBOUnReference(r->buf);
}
batch->list_count = 0;
batch->nr_relocs = 0;
batch->flags = 0;
driBOAddListItem(batch->list, batch->buffer,
DRM_BO_FLAG_EXE | DRM_BO_FLAG_MEM_TT,
DRM_BO_FLAG_EXE | DRM_BO_MASK_MEM,
&batch->dest_location, &batch->node);
req = &batch->node->bo_arg.d.req.bo_req;
/*
* We don't refcount the batchbuffer itself since we can't destroy it
* while it's on the list.
* Set up information needed for us to make relocations
* relative to the underlying drm buffer objects.
*/
driBOAddListItem(&batch->list, batch->buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE);
driReadLockKernelBO();
bo = driBOKernel(batch->buffer);
req->presumed_offset = (uint64_t) bo->offset;
req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
batch->drmBOVirtual = (uint8_t *) bo->virtual;
driReadUnlockKernelBO();
/*
* Adjust the relocation buffer size.
*/
if (batch->reloc_size > INTEL_MAX_RELOCS ||
batch->reloc == NULL)
intel_realloc_relocs(batch, INTEL_DEFAULT_RELOCS);
assert(batch->reloc != NULL);
batch->reloc[0] = 0; /* No relocs yet. */
batch->reloc[1] = 1; /* Reloc type 1 */
batch->reloc[2] = 0; /* Only a single relocation list. */
batch->reloc[3] = 0; /* Only a single relocation list. */
batch->map = driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, 0);
batch->poolOffset = driBOPoolOffset(batch->buffer);
batch->ptr = batch->map;
batch->dirty_state = ~0;
batch->nr_relocs = 0;
batch->flags = 0;
batch->id = 0;//batch->intel->intelScreen->batch_id++;
}
/*======================================================================
* Public functions
*/
@@ -141,121 +128,253 @@ intel_batchbuffer_alloc(struct intel_context *intel)
&batch->buffer, 4096,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, 0);
batch->last_fence = NULL;
driBOCreateList(20, &batch->list);
batch->list = driBOCreateList(20);
batch->reloc = NULL;
intel_batchbuffer_reset(batch);
return batch;
}
void
intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
if (batch->last_fence) {
driFenceFinish(batch->last_fence,
DRM_FENCE_TYPE_EXE | DRM_I915_FENCE_TYPE_RW,
GL_FALSE);
driFenceUnReference(batch->last_fence);
batch->last_fence = NULL;
DRM_FENCE_TYPE_EXE, GL_FALSE);
driFenceUnReference(&batch->last_fence);
}
if (batch->map) {
driBOUnmap(batch->buffer);
batch->map = NULL;
}
driBOUnReference(batch->buffer);
driBOFreeList(batch->list);
if (batch->reloc)
free(batch->reloc);
batch->buffer = NULL;
free(batch);
}
static void
intel_batch_ioctl(struct intel_context *intel,
uint start_offset, uint used, boolean allow_unlock)
void
intel_offset_relocation(struct intel_batchbuffer *batch,
unsigned pre_add,
struct _DriBufferObject *driBO,
uint64_t val_flags,
uint64_t val_mask)
{
drmI830BatchBuffer batch;
int itemLoc;
struct _drmBONode *node;
uint32_t *reloc;
struct drm_bo_info_req *req;
driBOAddListItem(batch->list, driBO, val_flags, val_mask,
&itemLoc, &node);
req = &node->bo_arg.d.req.bo_req;
batch.start = start_offset;
batch.used = used;
batch.cliprects = NULL; /* unused */
batch.num_cliprects = 0;
batch.DR1 = 0;
batch.DR4 = 0; /* still need this ? */
if (!(req->hint & DRM_BO_HINT_PRESUMED_OFFSET)) {
DBG(IOCTL, "%s: 0x%x..0x%x DR4: %x cliprects: %d\n",
__FUNCTION__,
batch.start,
batch.start + batch.used * 4, batch.DR4, batch.num_cliprects);
/*
* Stop other threads from tampering with the underlying
* drmBO while we're reading its offset.
*/
if (drmCommandWrite(intel->driFd, DRM_I830_BATCHBUFFER, &batch,
sizeof(batch))) {
printf("DRM_I830_BATCHBUFFER: %d\n", -errno);
UNLOCK_HARDWARE(intel);
exit(1);
}
driReadLockKernelBO();
req->presumed_offset = (uint64_t) driBOKernel(driBO)->offset;
driReadUnlockKernelBO();
req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
}
pre_add += driBOPoolOffset(driBO);
if (batch->nr_relocs == batch->reloc_size)
intel_realloc_relocs(batch, batch->reloc_size * 2);
reloc = batch->reloc +
(I915_RELOC_HEADER + batch->nr_relocs * I915_RELOC0_STRIDE);
reloc[0] = ((uint8_t *)batch->ptr - batch->drmBOVirtual);
intel_batchbuffer_emit_dword(batch, req->presumed_offset + pre_add);
reloc[1] = pre_add;
reloc[2] = itemLoc;
reloc[3] = batch->dest_location;
batch->nr_relocs++;
}
static void
i915_drm_copy_reply(const struct drm_bo_info_rep * rep, drmBO * buf)
{
buf->handle = rep->handle;
buf->flags = rep->flags;
buf->size = rep->size;
buf->offset = rep->offset;
buf->mapHandle = rep->arg_handle;
buf->proposedFlags = rep->proposed_flags;
buf->start = rep->buffer_start;
buf->fenceFlags = rep->fence_flags;
buf->replyFlags = rep->rep_flags;
buf->pageAlignment = rep->page_alignment;
}
static int
i915_execbuf(struct intel_batchbuffer *batch,
GLuint used,
GLboolean ignore_cliprects,
drmBOList *list,
struct drm_i915_execbuffer *ea)
{
struct intel_context *intel = batch->intel;
drmBONode *node;
drmMMListHead *l;
struct drm_i915_op_arg *arg, *first;
struct drm_bo_op_req *req;
struct drm_bo_info_rep *rep;
uint64_t *prevNext = NULL;
drmBO *buf;
int ret = 0;
uint32_t count = 0;
first = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
arg = &node->bo_arg;
req = &arg->d.req;
if (!first)
first = arg;
if (prevNext)
*prevNext = (unsigned long)arg;
prevNext = &arg->next;
req->bo_req.handle = node->buf->handle;
req->op = drm_bo_validate;
req->bo_req.flags = node->arg0;
req->bo_req.mask = node->arg1;
req->bo_req.hint |= 0;
count++;
}
memset(ea, 0, sizeof(*ea));
ea->num_buffers = count;
ea->batch.start = batch->poolOffset;
ea->batch.used = used;
#if 0 /* ZZZ JB: no cliprects used */
ea->batch.cliprects = intel->pClipRects;
ea->batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
ea->batch.DR1 = 0;
ea->batch.DR4 = 0;((((GLuint) intel->drawX) & 0xffff) |
(((GLuint) intel->drawY) << 16));
#else
ea->batch.cliprects = NULL;
ea->batch.num_cliprects = 0;
ea->batch.DR1 = 0;
ea->batch.DR4 = 0;
#endif
ea->fence_arg.flags = DRM_I915_FENCE_FLAG_FLUSHED;
ea->ops_list = (unsigned long) first;
first->reloc_ptr = (unsigned long) batch->reloc;
batch->reloc[0] = batch->nr_relocs;
//return -EFAULT;
do {
ret = drmCommandWriteRead(intel->driFd, DRM_I915_EXECBUFFER, ea,
sizeof(*ea));
} while (ret == -EAGAIN);
if (ret != 0)
return ret;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
arg = &node->bo_arg;
rep = &arg->d.rep.bo_info;
if (!arg->handled) {
return -EFAULT;
}
if (arg->d.rep.ret)
return arg->d.rep.ret;
buf = node->buf;
i915_drm_copy_reply(rep, buf);
}
return 0;
}
/* TODO: Push this whole function into bufmgr.
*/
static void
static struct _DriFenceObject *
do_flush_locked(struct intel_batchbuffer *batch,
uint used, boolean allow_unlock)
GLuint used,
GLboolean ignore_cliprects, GLboolean allow_unlock)
{
uint *ptr;
uint i, fenceFlags;
struct intel_context *intel = batch->intel;
struct _DriFenceObject *fo;
drmFence fence;
drmBOList *boList;
struct drm_i915_execbuffer ea;
int ret = 0;
driBOValidateList(batch->intel->driFd, &batch->list);
driBOValidateUserList(batch->list);
boList = driGetdrmBOList(batch->list);
/* Apply the relocations. This nasty map indicates to me that the
* whole task should be done internally by the memory manager, and
* that dma buffers probably need to be pinned within agp space.
*/
ptr = (uint *) driBOMap(batch->buffer, DRM_BO_FLAG_WRITE,
DRM_BO_HINT_ALLOW_UNFENCED_MAP);
for (i = 0; i < batch->nr_relocs; i++) {
struct buffer_reloc *r = &batch->reloc[i];
ptr[r->offset / 4] = driBOOffset(r->buf) + r->delta;
#if 0 /* ZZZ JB Allways run */
if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
#else
if (1) {
#endif
ret = i915_execbuf(batch, used, ignore_cliprects, boList, &ea);
} else {
driPutdrmBOList(batch->list);
fo = NULL;
goto out;
}
driPutdrmBOList(batch->list);
if (ret)
abort();
if (0)
intel_dump_batchbuffer(0, ptr, used);
if (ea.fence_arg.error != 0) {
driBOUnmap(batch->buffer);
batch->map = NULL;
intel_batch_ioctl(batch->intel,
driBOOffset(batch->buffer),
used, allow_unlock);
/*
* Kernel fencing. The flags tells the kernel that we've
* programmed an MI_FLUSH.
*/
fenceFlags = DRM_I915_FENCE_FLAG_FLUSHED;
fo = driFenceBuffers(batch->intel->driFd, "Batch fence", fenceFlags);
/*
* User space fencing.
*/
driBOFence(batch->buffer, fo);
if (driFenceType(fo) == DRM_FENCE_TYPE_EXE) {
/*
* Oops. We only validated a batch buffer. This means we
* didn't do any proper rendering. Discard this fence object.
* The hardware has been idled by the kernel.
* Don't fence the driBOs.
*/
driFenceUnReference(fo);
}
else {
driFenceUnReference(batch->last_fence);
batch->last_fence = fo;
for (i = 0; i < batch->nr_relocs; i++) {
struct buffer_reloc *r = &batch->reloc[i];
driBOFence(r->buf, fo);
}
if (batch->last_fence)
driFenceUnReference(&batch->last_fence);
#if 0 /* ZZZ JB: no _mesa_* funcs in gallium */
_mesa_printf("fence error\n");
#endif
batch->last_fence = NULL;
fo = NULL;
goto out;
}
fence.handle = ea.fence_arg.handle;
fence.fence_class = ea.fence_arg.fence_class;
fence.type = ea.fence_arg.type;
fence.flags = ea.fence_arg.flags;
fence.signaled = ea.fence_arg.signaled;
fo = driBOFenceUserList(batch->intel->intelScreen->mgr, batch->list,
"SuperFence", &fence);
if (driFenceType(fo) & DRM_I915_FENCE_TYPE_RW) {
if (batch->last_fence)
driFenceUnReference(&batch->last_fence);
/*
* FIXME: Context last fence??
*/
batch->last_fence = fo;
driFenceReference(fo);
}
out:
#if 0 /* ZZZ JB: fix this */
intel->vtbl.lost_hardware(intel);
#else
(void)intel;
#endif
return fo;
}
@@ -263,29 +382,43 @@ struct _DriFenceObject *
intel_batchbuffer_flush(struct intel_batchbuffer *batch)
{
struct intel_context *intel = batch->intel;
uint used = batch->ptr - batch->map;
const boolean was_locked = intel->locked;
GLuint used = batch->ptr - batch->map;
GLboolean was_locked = intel->locked;
struct _DriFenceObject *fence;
if (used == 0)
if (used == 0) {
driFenceReference(batch->last_fence);
return batch->last_fence;
#define MI_FLUSH ((0 << 29) | (4 << 23))
}
/* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
* performance drain that we would like to avoid.
*/
#if 0 /* ZZZ JB: what should we do here? */
if (used & 4) {
((int *) batch->ptr)[0] = MI_FLUSH;
((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
((int *) batch->ptr)[1] = 0;
((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END;
used += 12;
}
else {
((int *) batch->ptr)[0] = MI_FLUSH;
((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END;
used += 8;
}
#else
if (used & 4) {
((int *) batch->ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
((int *) batch->ptr)[1] = 0;
((int *) batch->ptr)[2] = (0xA<<23); // MI_BATCH_BUFFER_END;
used += 12;
}
else {
((int *) batch->ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
((int *) batch->ptr)[1] = (0xA<<23); // MI_BATCH_BUFFER_END;
used += 8;
}
#endif
driBOUnmap(batch->buffer);
batch->ptr = NULL;
batch->map = NULL;
@@ -296,7 +429,8 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch)
if (!was_locked)
LOCK_HARDWARE(intel);
do_flush_locked(batch, used, GL_FALSE);
fence = do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS),
GL_FALSE);
if (!was_locked)
UNLOCK_HARDWARE(intel);
@@ -304,52 +438,20 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch)
/* Reset the buffer:
*/
intel_batchbuffer_reset(batch);
return batch->last_fence;
return fence;
}
void
intel_batchbuffer_finish(struct intel_batchbuffer *batch)
{
struct _DriFenceObject *fence = intel_batchbuffer_flush(batch);
if (fence) {
driFenceReference(fence);
driFenceFinish(fence,
DRM_FENCE_TYPE_EXE | DRM_I915_FENCE_TYPE_RW,
GL_FALSE);
driFenceUnReference(fence);
}
driFenceFinish(fence, driFenceType(fence), GL_FALSE);
driFenceUnReference(&fence);
}
/* This is the only way buffers get added to the validate list.
*/
boolean
intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
struct _DriBufferObject *buffer,
uint flags, uint mask, uint delta)
{
assert(batch->nr_relocs < MAX_RELOCS);
driBOAddListItem(&batch->list, buffer, flags, mask);
{
struct buffer_reloc *r = &batch->reloc[batch->nr_relocs++];
driBOReference(buffer);
r->buf = buffer;
r->offset = batch->ptr - batch->map;
r->delta = delta;
*(uint *) batch->ptr = 0x12345678;
}
batch->ptr += 4;
return GL_TRUE;
}
void
intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data, uint bytes, uint flags)
const void *data, GLuint bytes, GLuint flags)
{
assert((bytes & 3) == 0);
intel_batchbuffer_require_space(batch, bytes, flags);
@@ -1,54 +1,20 @@
/**************************************************************************
*
* Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_BATCHBUFFER_H
#define INTEL_BATCHBUFFER_H
#include "pipe/p_debug.h"
#include "pipe/p_compiler.h"
#include "dri_bufmgr.h"
#include "mtypes.h"
#include "ws_dri_bufmgr.h"
struct intel_context;
#define BATCH_SZ 16384
#define BATCH_RESERVED 16
#define MAX_RELOCS 4096
#define INTEL_DEFAULT_RELOCS 100
#define INTEL_MAX_RELOCS 400
#define INTEL_BATCH_NO_CLIPRECTS 0x1
#define INTEL_BATCH_CLIPRECTS 0x2
struct buffer_reloc
{
struct _DriBufferObject *buf;
uint offset;
uint delta; /* not needed? */
};
struct intel_batchbuffer
{
struct bufmgr *bm;
@@ -56,19 +22,30 @@ struct intel_batchbuffer
struct _DriBufferObject *buffer;
struct _DriFenceObject *last_fence;
uint flags;
GLuint flags;
drmBOList list;
uint list_count;
ubyte *map;
ubyte *ptr;
struct _DriBufferList *list;
GLuint list_count;
GLubyte *map;
GLubyte *ptr;
struct buffer_reloc reloc[MAX_RELOCS];
uint nr_relocs;
uint size;
uint32_t *reloc;
GLuint reloc_size;
GLuint nr_relocs;
GLuint size;
GLuint dirty_state;
GLuint id;
uint32_t poolOffset;
uint8_t *drmBOVirtual;
struct _drmBONode *node; /* Validation list node for this buffer */
int dest_location; /* Validation list sequence for this buffer */
};
struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context *intel);
struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
*intel);
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
@@ -82,26 +59,28 @@ void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
/* Unlike bmBufferData, this currently requires the buffer be mapped.
* Consider it a convenience function wrapping multiple
* Consider it a convenience function wrapping multple
* intel_buffer_dword() calls.
*/
void intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data, uint bytes, uint flags);
const void *data, GLuint bytes, GLuint flags);
void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
uint bytes);
GLuint bytes);
boolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
struct _DriBufferObject *buffer,
uint flags,
uint mask, uint offset);
void
intel_offset_relocation(struct intel_batchbuffer *batch,
unsigned pre_add,
struct _DriBufferObject *driBO,
uint64_t val_flags,
uint64_t val_mask);
/* Inline functions - might actually be better off with these
* non-inlined. Certainly better off switching all command packets to
* be passed as structs rather than dwords, but that's a little bit of
* work...
*/
static INLINE uint
static INLINE GLuint
intel_batchbuffer_space(struct intel_batchbuffer *batch)
{
return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
@@ -109,22 +88,26 @@ intel_batchbuffer_space(struct intel_batchbuffer *batch)
static INLINE void
intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, uint dword)
intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
{
assert(batch->map);
assert(intel_batchbuffer_space(batch) >= 4);
*(uint *) (batch->ptr) = dword;
*(GLuint *) (batch->ptr) = dword;
batch->ptr += 4;
}
static INLINE void
intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
uint sz, uint flags)
GLuint sz, GLuint flags)
{
struct _DriFenceObject *fence;
assert(sz < batch->size - 8);
if (intel_batchbuffer_space(batch) < sz ||
(batch->flags != 0 && flags != 0 && batch->flags != flags))
intel_batchbuffer_flush(batch);
(batch->flags != 0 && flags != 0 && batch->flags != flags)) {
fence = intel_batchbuffer_flush(batch);
driFenceUnReference(&fence);
}
batch->flags |= flags;
}
@@ -134,14 +117,15 @@ intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
#define BATCH_LOCALS
#define BEGIN_BATCH(n, flags) do { \
assert(!intel->prim.flush); \
intel_batchbuffer_require_space(intel->batch, (n)*4, flags); \
} while (0)
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
#define OUT_RELOC(buf,flags,mask,delta) do { \
assert((delta) >= 0); \
intel_batchbuffer_emit_reloc(intel->batch, buf, flags, mask, delta); \
assert((delta) >= 0); \
intel_offset_relocation(intel->batch, delta, buf, flags, mask); \
} while (0)
#define ADVANCE_BATCH() do { } while(0)
@@ -1,427 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
/**
* XXX NOTE: there are no intel dependencies in this file.
* Rename to dri_batchpool.c?
*/
#include <xf86drm.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include "pipe/p_compiler.h"
#include "pipe/p_thread.h"
#include "dri_bufpool.h"
#include "dri_bufmgr.h"
#include "intel_batchpool.h"
typedef struct
{
drmMMListHead head;
struct _BPool *parent;
struct _DriFenceObject *fence;
unsigned long start;
int unfenced;
int mapped;
} BBuf;
typedef struct _BPool
{
_glthread_Mutex mutex;
unsigned long bufSize;
unsigned poolSize;
unsigned numFree;
unsigned numTot;
unsigned numDelayed;
unsigned checkDelayed;
drmMMListHead free;
drmMMListHead delayed;
drmMMListHead head;
drmBO kernelBO;
void *virtual;
BBuf *bufs;
} BPool;
static BPool *
createBPool(int fd, unsigned long bufSize, unsigned numBufs, unsigned flags,
unsigned checkDelayed)
{
BPool *p = (BPool *) malloc(sizeof(*p));
BBuf *buf;
int i;
if (!p)
return NULL;
p->bufs = (BBuf *) malloc(numBufs * sizeof(*p->bufs));
if (!p->bufs) {
free(p);
return NULL;
}
DRMINITLISTHEAD(&p->free);
DRMINITLISTHEAD(&p->head);
DRMINITLISTHEAD(&p->delayed);
p->numTot = numBufs;
p->numFree = numBufs;
p->bufSize = bufSize;
p->numDelayed = 0;
p->checkDelayed = checkDelayed;
_glthread_INIT_MUTEX(p->mutex);
if (drmBOCreate(fd, 0, numBufs * bufSize, 0, NULL, drm_bo_type_dc,
flags, DRM_BO_HINT_DONT_FENCE, &p->kernelBO)) {
free(p->bufs);
free(p);
return NULL;
}
if (drmBOMap(fd, &p->kernelBO, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0,
&p->virtual)) {
drmBODestroy(fd, &p->kernelBO);
free(p->bufs);
free(p);
return NULL;
}
/*
* We unmap the buffer so that we can validate it later. Note that this is
* just a synchronizing operation. The buffer will have a virtual mapping
* until it is destroyed.
*/
drmBOUnmap(fd, &p->kernelBO);
buf = p->bufs;
for (i = 0; i < numBufs; ++i) {
buf->parent = p;
buf->fence = NULL;
buf->start = i * bufSize;
buf->mapped = 0;
buf->unfenced = 0;
DRMLISTADDTAIL(&buf->head, &p->free);
buf++;
}
return p;
}
static void
pool_checkFree(BPool * p, int wait)
{
drmMMListHead *list, *prev;
BBuf *buf;
int signaled = 0;
int i;
list = p->delayed.next;
if (p->numDelayed > 3) {
for (i = 0; i < p->numDelayed; i += 3) {
list = list->next;
}
}
prev = list->prev;
for (; list != &p->delayed; list = prev, prev = list->prev) {
buf = DRMLISTENTRY(BBuf, list, head);
if (!signaled) {
if (wait) {
driFenceFinish(buf->fence, DRM_FENCE_TYPE_EXE, 1);
signaled = 1;
}
else {
signaled = driFenceSignaled(buf->fence, DRM_FENCE_TYPE_EXE);
}
}
if (!signaled)
break;
driFenceUnReference(buf->fence);
buf->fence = NULL;
DRMLISTDEL(list);
p->numDelayed--;
DRMLISTADD(list, &p->free);
p->numFree++;
}
}
static void *
pool_create(struct _DriBufferPool *pool,
unsigned long size, unsigned flags, unsigned hint,
unsigned alignment)
{
BPool *p = (BPool *) pool->data;
drmMMListHead *item;
if (alignment && (alignment != 4096))
return NULL;
_glthread_LOCK_MUTEX(p->mutex);
if (p->numFree == 0)
pool_checkFree(p, TRUE);
if (p->numFree == 0) {
fprintf(stderr, "Out of fixed size buffer objects\n");
BM_CKFATAL(-ENOMEM);
}
item = p->free.next;
if (item == &p->free) {
fprintf(stderr, "Fixed size buffer pool corruption\n");
}
DRMLISTDEL(item);
--p->numFree;
_glthread_UNLOCK_MUTEX(p->mutex);
return (void *) DRMLISTENTRY(BBuf, item, head);
}
static int
pool_destroy(struct _DriBufferPool *pool, void *private)
{
BBuf *buf = (BBuf *) private;
BPool *p = buf->parent;
_glthread_LOCK_MUTEX(p->mutex);
if (buf->fence) {
DRMLISTADDTAIL(&buf->head, &p->delayed);
p->numDelayed++;
}
else {
buf->unfenced = 0;
DRMLISTADD(&buf->head, &p->free);
p->numFree++;
}
if ((p->numDelayed % p->checkDelayed) == 0)
pool_checkFree(p, 0);
_glthread_UNLOCK_MUTEX(p->mutex);
return 0;
}
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, void **virtual)
{
BBuf *buf = (BBuf *) private;
BPool *p = buf->parent;
_glthread_LOCK_MUTEX(p->mutex);
/*
* Currently Mesa doesn't have any condition variables to resolve this
* cleanly in a multithreading environment.
* We bail out instead.
*/
if (buf->mapped) {
fprintf(stderr, "Trying to map already mapped buffer object\n");
BM_CKFATAL(-EINVAL);
}
#if 0
if (buf->unfenced && !(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
fprintf(stderr, "Trying to map an unfenced buffer object 0x%08x"
" 0x%08x %d\n", hint, flags, buf->start);
BM_CKFATAL(-EINVAL);
}
#endif
if (buf->fence) {
_glthread_UNLOCK_MUTEX(p->mutex);
return -EBUSY;
}
buf->mapped = TRUE;
*virtual = (unsigned char *) p->virtual + buf->start;
_glthread_UNLOCK_MUTEX(p->mutex);
return 0;
}
static int
pool_waitIdle(struct _DriBufferPool *pool, void *private, int lazy)
{
BBuf *buf = (BBuf *) private;
driFenceFinish(buf->fence, 0x0, lazy);
return 0;
}
static int
pool_unmap(struct _DriBufferPool *pool, void *private)
{
BBuf *buf = (BBuf *) private;
buf->mapped = 0;
return 0;
}
static unsigned long
pool_offset(struct _DriBufferPool *pool, void *private)
{
BBuf *buf = (BBuf *) private;
BPool *p = buf->parent;
return p->kernelBO.offset + buf->start;
}
static unsigned
pool_flags(struct _DriBufferPool *pool, void *private)
{
BPool *p = (BPool *) pool->data;
return p->kernelBO.flags;
}
static unsigned long
pool_size(struct _DriBufferPool *pool, void *private)
{
BPool *p = (BPool *) pool->data;
return p->bufSize;
}
static int
pool_fence(struct _DriBufferPool *pool, void *private,
struct _DriFenceObject *fence)
{
BBuf *buf = (BBuf *) private;
BPool *p = buf->parent;
_glthread_LOCK_MUTEX(p->mutex);
if (buf->fence) {
driFenceUnReference(buf->fence);
}
buf->fence = fence;
buf->unfenced = 0;
driFenceReference(buf->fence);
_glthread_UNLOCK_MUTEX(p->mutex);
return 0;
}
static drmBO *
pool_kernel(struct _DriBufferPool *pool, void *private)
{
BBuf *buf = (BBuf *) private;
BPool *p = buf->parent;
return &p->kernelBO;
}
static int
pool_validate(struct _DriBufferPool *pool, void *private)
{
BBuf *buf = (BBuf *) private;
BPool *p = buf->parent;
_glthread_LOCK_MUTEX(p->mutex);
buf->unfenced = TRUE;
_glthread_UNLOCK_MUTEX(p->mutex);
return 0;
}
static void
pool_takedown(struct _DriBufferPool *pool)
{
BPool *p = (BPool *) pool->data;
/*
* Wait on outstanding fences.
*/
_glthread_LOCK_MUTEX(p->mutex);
while ((p->numFree < p->numTot) && p->numDelayed) {
_glthread_UNLOCK_MUTEX(p->mutex);
sched_yield();
pool_checkFree(p, TRUE);
_glthread_LOCK_MUTEX(p->mutex);
}
drmBODestroy(pool->fd, &p->kernelBO);
free(p->bufs);
_glthread_UNLOCK_MUTEX(p->mutex);
free(p);
free(pool);
}
struct _DriBufferPool *
driBatchPoolInit(int fd, unsigned flags,
unsigned long bufSize,
unsigned numBufs, unsigned checkDelayed)
{
struct _DriBufferPool *pool;
pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
if (!pool)
return NULL;
pool->data = createBPool(fd, bufSize, numBufs, flags, checkDelayed);
if (!pool->data)
return NULL;
pool->fd = fd;
pool->map = &pool_map;
pool->unmap = &pool_unmap;
pool->destroy = &pool_destroy;
pool->offset = &pool_offset;
pool->flags = &pool_flags;
pool->size = &pool_size;
pool->create = &pool_create;
pool->fence = &pool_fence;
pool->kernel = &pool_kernel;
pool->validate = &pool_validate;
pool->waitIdle = &pool_waitIdle;
pool->setstatic = NULL;
pool->takeDown = &pool_takedown;
return pool;
}
+3 -2
View File
@@ -162,6 +162,7 @@ intelCreateContext(const __GLcontextModes * visual,
* memory pools
*/
DRM_LIGHT_LOCK(sPriv->fd, &sPriv->pSAREA->lock, driContextPriv->hHWContext);
// ZZZ JB should be per screen and not be done per context
havePools = intelCreatePools(sPriv);
DRM_UNLOCK(sPriv->fd, &sPriv->pSAREA->lock, driContextPriv->hHWContext);
if (!havePools)
@@ -234,12 +235,12 @@ intelDestroyContext(__DRIcontextPrivate * driContextPriv)
if (intel->last_swap_fence) {
driFenceFinish(intel->last_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
driFenceUnReference(intel->last_swap_fence);
driFenceUnReference(&intel->last_swap_fence);
intel->last_swap_fence = NULL;
}
if (intel->first_swap_fence) {
driFenceFinish(intel->first_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
driFenceUnReference(intel->first_swap_fence);
driFenceUnReference(&intel->first_swap_fence);
intel->first_swap_fence = NULL;
}
+1 -1
View File
@@ -28,7 +28,7 @@
#ifndef INTEL_CONTEXT_H
#define INTEL_CONTEXT_H
#include <stdint.h>
#include "drm.h"
#include "pipe/p_debug.h"
+47 -6
View File
@@ -32,12 +32,12 @@
#include "intel_context.h"
#include "intel_screen.h"
#include "intel_batchbuffer.h"
#include "intel_batchpool.h"
//#include "intel_batchpool.h"
#include "intel_swapbuffers.h"
#include "intel_winsys.h"
#include "i830_dri.h"
#include "dri_bufpool.h"
#include "ws_dri_bufpool.h"
#include "pipe/p_context.h"
#include "state_tracker/st_public.h"
@@ -132,6 +132,7 @@ intelUpdateScreenRotation(__DRIscreenPrivate * sPriv, drmI830Sarea * sarea)
assert( sarea->front_size >=
intelScreen->front.pitch * intelScreen->front.height );
#if 0 /* JB not important */
if (!sarea->front_handle)
return;
@@ -142,30 +143,41 @@ intelUpdateScreenRotation(__DRIscreenPrivate * sPriv, drmI830Sarea * sarea)
fprintf(stderr, "drmMap(frontbuffer) failed!\n");
return;
}
#endif
#if 0 /* JB */
if (intelScreen->staticPool) {
driGenBuffers(intelScreen->staticPool, "static region", 1,
&intelScreen->front.buffer, 64,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_NO_MOVE |
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
driBOSetStatic(intelScreen->front.buffer,
intelScreen->front.offset,
intelScreen->front.pitch * intelScreen->front.height,
intelScreen->front.map, 0);
}
#else
if (intelScreen->staticPool) {
if (intelScreen->front.buffer)
driBOUnReference(intelScreen->front.buffer);
driGenBuffers(intelScreen->staticPool, "front", 1, &intelScreen->front.buffer, 0, 0, 0);
driBOSetReferenced(intelScreen->front.buffer, sarea->front_bo_handle);
}
#endif
}
boolean
intelCreatePools(__DRIscreenPrivate * sPriv)
{
unsigned batchPoolSize = 1024*1024;
//unsigned batchPoolSize = 1024*1024;
struct intel_screen *intelScreen = intel_screen(sPriv);
if (intelScreen->havePools)
return GL_TRUE;
#if 0 /* ZZZ JB fix this */
intelScreen->staticPool = driDRMStaticPoolInit(sPriv->fd);
if (!intelScreen->staticPool)
return GL_FALSE;
@@ -181,7 +193,17 @@ intelCreatePools(__DRIscreenPrivate * sPriv)
fprintf(stderr, "Failed to initialize batch pool - possible incorrect agpgart installed\n");
return GL_FALSE;
}
#else
intelScreen->staticPool = driDRMPoolInit(sPriv->fd);
intelScreen->batchPool = driSlabPoolInit(sPriv->fd,
DRM_BO_FLAG_EXE |
DRM_BO_FLAG_MEM_TT,
DRM_BO_FLAG_EXE |
DRM_BO_FLAG_MEM_TT,
4 * 4096, //intelScreen->maxBatchSize,
1, 40, 16*16384, 0,
intelScreen->fMan);
#endif
intelScreen->havePools = GL_TRUE;
intelUpdateScreenRotation(sPriv, intelScreen->sarea);
@@ -240,7 +262,26 @@ intelInitDriver(__DRIscreenPrivate * sPriv)
(*glx_enable_extension) (psc, "GLX_SGI_make_current_read");
}
intelScreen->winsys = intel_create_pipe_winsys(sPriv->fd);
#if 1 // ZZZ JB
intelScreen->mgr = driFenceMgrTTMInit(sPriv->fd);
if (!intelScreen->mgr) {
fprintf(stderr, "Failed to create fence manager.\n");
return GL_FALSE;
}
intelScreen->fMan = driInitFreeSlabManager(10, 10);
if (!intelScreen->fMan) {
fprintf(stderr, "Failed to create free slab manager.\n");
return GL_FALSE;
}
if (!intelCreatePools(sPriv))
return GL_FALSE;
#endif
intelScreen->winsys = intel_create_pipe_winsys(sPriv->fd, intelScreen->fMan);
return GL_TRUE;
}
+8 -1
View File
@@ -31,7 +31,7 @@
#include "dri_util.h"
#include "i830_common.h"
#include "xmlconfig.h"
#include "dri_bufpool.h"
#include "ws_dri_bufpool.h"
#include "pipe/p_compiler.h"
@@ -74,6 +74,13 @@ struct intel_screen
*/
struct intel_context *dummyContext;
/*
* New stuff form the i915tex integration
*/
struct _DriFenceMgr *mgr;
struct _DriFreeSlabManager *fMan;
unsigned batch_id;
struct pipe_winsys *winsys;
};
@@ -63,7 +63,7 @@ intelDisplaySurface(__DRIdrawablePrivate *dPriv,
if (intel->last_swap_fence) {
driFenceFinish(intel->last_swap_fence, DRM_FENCE_TYPE_EXE, TRUE);
driFenceUnReference(intel->last_swap_fence);
driFenceUnReference(&intel->last_swap_fence);
intel->last_swap_fence = NULL;
}
intel->last_swap_fence = intel->first_swap_fence;
@@ -178,9 +178,8 @@ intelDisplaySurface(__DRIdrawablePrivate *dPriv,
}
if (intel->first_swap_fence)
driFenceUnReference(intel->first_swap_fence);
driFenceUnReference(&intel->first_swap_fence);
intel->first_swap_fence = intel_batchbuffer_flush(intel->batch);
driFenceReference(intel->first_swap_fence);
}
UNLOCK_HARDWARE(intel);
+2 -2
View File
@@ -37,7 +37,7 @@ struct pipe_buffer;
struct _DriBufferObject;
struct pipe_winsys *
intel_create_pipe_winsys( int fd );
intel_create_pipe_winsys( int fd, struct _DriFreeSlabManager *fMan );
void
intel_destroy_pipe_winsys( struct pipe_winsys *winsys );
@@ -53,6 +53,7 @@ intel_create_i915simple( struct intel_context *intel,
struct intel_buffer {
struct pipe_buffer base;
struct _DriBufferPool *pool;
struct _DriBufferObject *driBO;
};
@@ -69,5 +70,4 @@ dri_bo( struct pipe_buffer *buf )
}
#endif
@@ -31,8 +31,8 @@
#include <stdlib.h>
#include <xf86drm.h>
#include "dri_bufpool.h"
#include "dri_bufmgr.h"
#include "ws_dri_bufpool.h"
#include "ws_dri_bufmgr.h"
#include "intel_context.h"
#include "intel_batchbuffer.h"
@@ -106,10 +106,18 @@ static void intel_i915_batch_reloc( struct i915_winsys *sws,
mask |= DRM_BO_FLAG_READ;
}
intel_batchbuffer_emit_reloc( intel->batch,
#if 0 /* JB old */
intel_batchbuffer_emit_reloc( intel->batch,
dri_bo( buf ),
flags, mask,
flags, mask,
delta );
#else /* new */
intel_offset_relocation( intel->batch,
delta,
dri_bo( buf ),
flags,
mask );
#endif
}
@@ -124,12 +132,24 @@ static void intel_i915_batch_flush( struct i915_winsys *sws,
struct pipe_fence_handle *pipe;
} fu;
if (fence)
assert(!*fence);
fu.dri = intel_batchbuffer_flush( intel->batch );
if (fu.dri)
iws->pws->fence_reference(iws->pws, fence, fu.pipe);
if (!fu.dri) {
assert(0);
*fence = NULL;
return;
}
if (fu.dri) {
if (fence)
*fence = fu.pipe;
else
driFenceUnReference(&fu.dri);
}
// if (0) intel_i915_batch_wait_idle( sws );
}
@@ -31,8 +31,8 @@
#include <stdlib.h>
#include <xf86drm.h>
#include "dri_bufpool.h"
#include "dri_bufmgr.h"
//#include "dri_bufpool.h"
//#include "dri_bufmgr.h"
#include "intel_context.h"
#include "intel_winsys.h"
@@ -50,6 +50,8 @@
struct intel_pipe_winsys {
struct pipe_winsys winsys;
struct _DriBufferPool *regionPool;
struct _DriBufferPool *mallocPool;
struct _DriFreeSlabManager *fMan;
};
@@ -92,6 +94,7 @@ intel_buffer_destroy(struct pipe_winsys *winsys,
struct pipe_buffer *buf)
{
driBOUnReference( dri_bo(buf) );
FREE(buf);
}
@@ -108,16 +111,19 @@ intel_buffer_create(struct pipe_winsys *winsys,
struct intel_buffer *buffer = CALLOC_STRUCT( intel_buffer );
struct intel_pipe_winsys *iws = intel_pipe_winsys(winsys);
unsigned flags = 0;
struct _DriBufferPool *pool;
buffer->base.refcount = 1;
buffer->base.alignment = alignment;
buffer->base.usage = usage;
buffer->base.size = size;
if (usage & (PIPE_BUFFER_USAGE_VERTEX /*| IWS_BUFFER_USAGE_LOCAL*/)) {
if (usage & (PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_CONSTANT)) {
flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
pool = iws->mallocPool;
} else {
flags |= DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MEM_TT;
pool = iws->regionPool;
}
if (usage & PIPE_BUFFER_USAGE_GPU_READ)
@@ -139,10 +145,11 @@ intel_buffer_create(struct pipe_winsys *winsys,
flags |= DRM_BO_FLAG_CACHED;
#endif
driGenBuffers( iws->regionPool,
buffer->pool = pool;
driGenBuffers( buffer->pool,
"pipe buffer", 1, &buffer->driBO, alignment, flags, 0 );
driBOData( buffer->driBO, size, NULL, 0 );
driBOData( buffer->driBO, size, NULL, buffer->pool, 0 );
return &buffer->base;
}
@@ -155,7 +162,9 @@ intel_user_buffer_create(struct pipe_winsys *winsys, void *ptr, unsigned bytes)
struct intel_pipe_winsys *iws = intel_pipe_winsys(winsys);
driGenUserBuffer( iws->regionPool,
"pipe user buffer", &buffer->driBO, ptr, bytes);
"pipe user buffer", &buffer->driBO, ptr, bytes );
buffer->base.refcount = 1;
return &buffer->base;
}
@@ -209,7 +218,7 @@ intel_i915_surface_alloc_storage(struct pipe_winsys *winsys,
unsigned flags)
{
const unsigned alignment = 64;
int ret;
//int ret;
surf->width = width;
surf->height = height;
@@ -249,9 +258,36 @@ intel_get_name( struct pipe_winsys *winsys )
return "Intel/DRI/ttm";
}
static void
intel_fence_reference( struct pipe_winsys *sws,
struct pipe_fence_handle **ptr,
struct pipe_fence_handle *fence )
{
if (*ptr)
driFenceUnReference((struct _DriFenceObject **)ptr);
if (fence)
*ptr = (struct pipe_fence_handle *)driFenceReference((struct _DriFenceObject *)fence);
}
static int
intel_fence_signalled( struct pipe_winsys *sws,
struct pipe_fence_handle *fence,
unsigned flag )
{
return driFenceSignaled((struct _DriFenceObject *)fence, flag);
}
static int
intel_fence_finish( struct pipe_winsys *sws,
struct pipe_fence_handle *fence,
unsigned flag )
{
return driFenceFinish((struct _DriFenceObject *)fence, flag, 0);
}
struct pipe_winsys *
intel_create_pipe_winsys( int fd )
intel_create_pipe_winsys( int fd, struct _DriFreeSlabManager *fMan )
{
struct intel_pipe_winsys *iws = CALLOC_STRUCT( intel_pipe_winsys );
@@ -273,8 +309,14 @@ intel_create_pipe_winsys( int fd )
iws->winsys.surface_alloc_storage = intel_i915_surface_alloc_storage;
iws->winsys.surface_release = intel_i915_surface_release;
iws->winsys.fence_reference = intel_fence_reference;
iws->winsys.fence_signalled = intel_fence_signalled;
iws->winsys.fence_finish = intel_fence_finish;
if (fd)
iws->regionPool = driDRMPoolInit(fd);
iws->regionPool = driDRMPoolInit(fd);
iws->mallocPool = driMallocPoolInit();
return &iws->winsys;
}
@@ -287,6 +329,9 @@ intel_destroy_pipe_winsys( struct pipe_winsys *winsys )
if (iws->regionPool) {
driPoolTakeDown(iws->regionPool);
}
if (iws->mallocPool) {
driPoolTakeDown(iws->mallocPool);
}
free(iws);
}
@@ -54,6 +54,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
#define DRM_I830_DESTROY_HEAP 0x0c
#define DRM_I830_SET_VBLANK_PIPE 0x0d
#define DRM_I830_GET_VBLANK_PIPE 0x0e
#define DRM_I830_MMIO 0x10
typedef struct {
enum {
@@ -85,6 +86,7 @@ typedef struct {
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int ctxOwner; /* last context to upload state */
/** Last context that used the buffer manager. */
int texAge;
int pf_enabled; /* is pageflipping allowed? */
int pf_active;
@@ -121,20 +123,29 @@ typedef struct {
unsigned int rotated_tiled;
unsigned int rotated2_tiled;
int pipeA_x;
int pipeA_y;
int pipeA_w;
int pipeA_h;
int pipeB_x;
int pipeB_y;
int pipeB_w;
int pipeB_h;
int planeA_x;
int planeA_y;
int planeA_w;
int planeA_h;
int planeB_x;
int planeB_y;
int planeB_w;
int planeB_h;
/* Triple buffering */
drm_handle_t third_handle;
int third_offset;
int third_size;
unsigned int third_tiled;
/* buffer object handles for the static buffers. May change
* over the lifetime of the client, though it doesn't in our current
* implementation.
*/
unsigned int front_bo_handle;
unsigned int back_bo_handle;
unsigned int third_bo_handle;
unsigned int depth_bo_handle;
} drmI830Sarea;
/* Flags for perf_boxes
@@ -223,4 +234,23 @@ typedef struct {
int pipe;
} drmI830VBlankPipe;
#define MMIO_READ 0
#define MMIO_WRITE 1
#define MMIO_REGS_IA_PRIMATIVES_COUNT 0
#define MMIO_REGS_IA_VERTICES_COUNT 1
#define MMIO_REGS_VS_INVOCATION_COUNT 2
#define MMIO_REGS_GS_PRIMITIVES_COUNT 3
#define MMIO_REGS_GS_INVOCATION_COUNT 4
#define MMIO_REGS_CL_PRIMITIVES_COUNT 5
#define MMIO_REGS_CL_INVOCATION_COUNT 6
#define MMIO_REGS_PS_INVOCATION_COUNT 7
#define MMIO_REGS_PS_DEPTH_COUNT 8
typedef struct {
unsigned int read_write:1;
unsigned int reg:31;
void __user *data;
} drmI830MMIO;
#endif /* _I830_DRM_H_ */
@@ -0,0 +1,953 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
*/
#include <xf86drm.h>
#include <stdlib.h>
#include "glthread.h"
#include "errno.h"
#include "ws_dri_bufmgr.h"
#include "string.h"
#include "imports.h"
#include "ws_dri_bufpool.h"
#include "ws_dri_fencemgr.h"
/*
* This lock is here to protect drmBO structs changing underneath us during a
* validate list call, since validatelist cannot take individiual locks for
* each drmBO. Validatelist takes this lock in write mode. Any access to an
* individual drmBO should take this lock in read mode, since in that case, the
* driBufferObject mutex will protect the access. Locking order is
* driBufferObject mutex - > this rw lock.
*/
_glthread_DECLARE_STATIC_MUTEX(bmMutex);
_glthread_DECLARE_STATIC_COND(bmCond);
static int kernelReaders = 0;
static int num_buffers = 0;
static int num_user_buffers = 0;
static drmBO *drmBOListBuf(void *iterator)
{
drmBONode *node;
drmMMListHead *l = (drmMMListHead *) iterator;
node = DRMLISTENTRY(drmBONode, l, head);
return node->buf;
}
static void *drmBOListIterator(drmBOList *list)
{
void *ret = list->list.next;
if (ret == &list->list)
return NULL;
return ret;
}
static void *drmBOListNext(drmBOList *list, void *iterator)
{
void *ret;
drmMMListHead *l = (drmMMListHead *) iterator;
ret = l->next;
if (ret == &list->list)
return NULL;
return ret;
}
static drmBONode *drmAddListItem(drmBOList *list, drmBO *item,
uint64_t arg0,
uint64_t arg1)
{
drmBONode *node;
drmMMListHead *l;
l = list->free.next;
if (l == &list->free) {
node = (drmBONode *) malloc(sizeof(*node));
if (!node) {
return NULL;
}
list->numCurrent++;
}
else {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
}
node->buf = item;
node->arg0 = arg0;
node->arg1 = arg1;
DRMLISTADD(&node->head, &list->list);
list->numOnList++;
return node;
}
static int drmAddValidateItem(drmBOList *list, drmBO *buf, uint64_t flags,
uint64_t mask, int *newItem)
{
drmBONode *node, *cur;
drmMMListHead *l;
*newItem = 0;
cur = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
if (node->buf == buf) {
cur = node;
break;
}
}
if (!cur) {
cur = drmAddListItem(list, buf, flags, mask);
if (!cur) {
return -ENOMEM;
}
*newItem = 1;
cur->arg0 = flags;
cur->arg1 = mask;
}
else {
uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
return -EINVAL;
}
cur->arg1 |= mask;
cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
(cur->arg0 & DRM_BO_MASK_MEM) == 0) {
return -EINVAL;
}
}
return 0;
}
static void drmBOFreeList(drmBOList *list)
{
drmBONode *node;
drmMMListHead *l;
l = list->list.next;
while(l != &list->list) {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
free(node);
l = list->list.next;
list->numCurrent--;
list->numOnList--;
}
l = list->free.next;
while(l != &list->free) {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
free(node);
l = list->free.next;
list->numCurrent--;
}
}
static int drmAdjustListNodes(drmBOList *list)
{
drmBONode *node;
drmMMListHead *l;
int ret = 0;
while(list->numCurrent < list->numTarget) {
node = (drmBONode *) malloc(sizeof(*node));
if (!node) {
ret = -ENOMEM;
break;
}
list->numCurrent++;
DRMLISTADD(&node->head, &list->free);
}
while(list->numCurrent > list->numTarget) {
l = list->free.next;
if (l == &list->free)
break;
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
free(node);
list->numCurrent--;
}
return ret;
}
static int drmBOCreateList(int numTarget, drmBOList *list)
{
DRMINITLISTHEAD(&list->list);
DRMINITLISTHEAD(&list->free);
list->numTarget = numTarget;
list->numCurrent = 0;
list->numOnList = 0;
return drmAdjustListNodes(list);
}
static int drmBOResetList(drmBOList *list)
{
drmMMListHead *l;
int ret;
ret = drmAdjustListNodes(list);
if (ret)
return ret;
l = list->list.next;
while (l != &list->list) {
DRMLISTDEL(l);
DRMLISTADD(l, &list->free);
list->numOnList--;
l = list->list.next;
}
return drmAdjustListNodes(list);
}
void driWriteLockKernelBO(void)
{
_glthread_LOCK_MUTEX(bmMutex);
while(kernelReaders != 0)
_glthread_COND_WAIT(bmCond, bmMutex);
}
void driWriteUnlockKernelBO(void)
{
_glthread_UNLOCK_MUTEX(bmMutex);
}
void driReadLockKernelBO(void)
{
_glthread_LOCK_MUTEX(bmMutex);
kernelReaders++;
_glthread_UNLOCK_MUTEX(bmMutex);
}
void driReadUnlockKernelBO(void)
{
_glthread_LOCK_MUTEX(bmMutex);
if (--kernelReaders == 0)
_glthread_COND_BROADCAST(bmCond);
_glthread_UNLOCK_MUTEX(bmMutex);
}
/*
* TODO: Introduce fence pools in the same way as
* buffer object pools.
*/
typedef struct _DriBufferObject
{
DriBufferPool *pool;
_glthread_Mutex mutex;
int refCount;
const char *name;
uint64_t flags;
unsigned hint;
unsigned alignment;
unsigned createdByReference;
void *private;
/* user-space buffer: */
unsigned userBuffer;
void *userData;
unsigned userSize;
} DriBufferObject;
typedef struct _DriBufferList {
drmBOList drmBuffers; /* List of kernel buffers needing validation */
drmBOList driBuffers; /* List of user-space buffers needing validation */
} DriBufferList;
void
bmError(int val, const char *file, const char *function, int line)
{
_mesa_printf("Fatal video memory manager error \"%s\".\n"
"Check kernel logs or set the LIBGL_DEBUG\n"
"environment variable to \"verbose\" for more info.\n"
"Detected in file %s, line %d, function %s.\n",
strerror(-val), file, line, function);
#ifndef NDEBUG
abort();
#else
abort();
#endif
}
extern drmBO *
driBOKernel(struct _DriBufferObject *buf)
{
drmBO *ret;
driReadLockKernelBO();
_glthread_LOCK_MUTEX(buf->mutex);
assert(buf->private != NULL);
ret = buf->pool->kernel(buf->pool, buf->private);
if (!ret)
BM_CKFATAL(-EINVAL);
_glthread_UNLOCK_MUTEX(buf->mutex);
driReadUnlockKernelBO();
return ret;
}
void
driBOWaitIdle(struct _DriBufferObject *buf, int lazy)
{
/*
* This function may block. Is it sane to keep the mutex held during
* that time??
*/
_glthread_LOCK_MUTEX(buf->mutex);
BM_CKFATAL(buf->pool->waitIdle(buf->pool, buf->private, &buf->mutex, lazy));
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void *
driBOMap(struct _DriBufferObject *buf, unsigned flags, unsigned hint)
{
void *virtual;
int retval;
if (buf->userBuffer) {
return buf->userData;
}
_glthread_LOCK_MUTEX(buf->mutex);
assert(buf->private != NULL);
retval = buf->pool->map(buf->pool, buf->private, flags, hint,
&buf->mutex, &virtual);
_glthread_UNLOCK_MUTEX(buf->mutex);
return retval == 0 ? virtual : NULL;
}
void
driBOUnmap(struct _DriBufferObject *buf)
{
if (buf->userBuffer)
return;
assert(buf->private != NULL);
_glthread_LOCK_MUTEX(buf->mutex);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
_glthread_UNLOCK_MUTEX(buf->mutex);
}
unsigned long
driBOOffset(struct _DriBufferObject *buf)
{
unsigned long ret;
assert(buf->private != NULL);
_glthread_LOCK_MUTEX(buf->mutex);
ret = buf->pool->offset(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
return ret;
}
unsigned long
driBOPoolOffset(struct _DriBufferObject *buf)
{
unsigned long ret;
assert(buf->private != NULL);
_glthread_LOCK_MUTEX(buf->mutex);
ret = buf->pool->poolOffset(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
return ret;
}
uint64_t
driBOFlags(struct _DriBufferObject *buf)
{
uint64_t ret;
assert(buf->private != NULL);
driReadLockKernelBO();
_glthread_LOCK_MUTEX(buf->mutex);
ret = buf->pool->flags(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
driReadUnlockKernelBO();
return ret;
}
struct _DriBufferObject *
driBOReference(struct _DriBufferObject *buf)
{
_glthread_LOCK_MUTEX(buf->mutex);
if (++buf->refCount == 1) {
_glthread_UNLOCK_MUTEX(buf->mutex);
BM_CKFATAL(-EINVAL);
}
_glthread_UNLOCK_MUTEX(buf->mutex);
return buf;
}
void
driBOUnReference(struct _DriBufferObject *buf)
{
int tmp;
if (!buf)
return;
_glthread_LOCK_MUTEX(buf->mutex);
tmp = --buf->refCount;
if (!tmp) {
_glthread_UNLOCK_MUTEX(buf->mutex);
if (buf->private) {
if (buf->createdByReference)
buf->pool->unreference(buf->pool, buf->private);
else
buf->pool->destroy(buf->pool, buf->private);
}
if (buf->userBuffer)
num_user_buffers--;
else
num_buffers--;
free(buf);
} else
_glthread_UNLOCK_MUTEX(buf->mutex);
}
int
driBOData(struct _DriBufferObject *buf,
unsigned size, const void *data,
DriBufferPool *newPool,
uint64_t flags)
{
void *virtual = NULL;
int newBuffer;
int retval = 0;
struct _DriBufferPool *pool;
assert(!buf->userBuffer); /* XXX just do a memcpy? */
_glthread_LOCK_MUTEX(buf->mutex);
pool = buf->pool;
if (pool == NULL && newPool != NULL) {
buf->pool = newPool;
pool = newPool;
}
if (newPool == NULL)
newPool = pool;
if (!pool->create) {
_mesa_error(NULL, GL_INVALID_OPERATION,
"driBOData called on invalid buffer\n");
BM_CKFATAL(-EINVAL);
}
newBuffer = (!buf->private || pool != newPool ||
pool->size(pool, buf->private) < size);
if (!flags)
flags = buf->flags;
if (newBuffer) {
if (buf->createdByReference) {
_mesa_error(NULL, GL_INVALID_OPERATION,
"driBOData requiring resizing called on "
"shared buffer.\n");
BM_CKFATAL(-EINVAL);
}
if (buf->private)
buf->pool->destroy(buf->pool, buf->private);
pool = newPool;
buf->pool = newPool;
buf->private = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
buf->alignment);
if (!buf->private)
retval = -ENOMEM;
if (retval == 0)
retval = pool->map(pool, buf->private,
DRM_BO_FLAG_WRITE,
DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual);
} else if (pool->map(pool, buf->private, DRM_BO_FLAG_WRITE,
DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual)) {
/*
* Buffer is busy. need to create a new one.
*/
void *newBuf;
newBuf = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
buf->alignment);
if (newBuf) {
buf->pool->destroy(buf->pool, buf->private);
buf->private = newBuf;
}
retval = pool->map(pool, buf->private,
DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
} else {
uint64_t flag_diff = flags ^ buf->flags;
/*
* We might need to change buffer flags.
*/
if (flag_diff){
assert(pool->setStatus != NULL);
BM_CKFATAL(pool->unmap(pool, buf->private));
BM_CKFATAL(pool->setStatus(pool, buf->private, flag_diff,
buf->flags));
if (!data)
goto out;
retval = pool->map(pool, buf->private,
DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
}
}
if (retval == 0) {
if (data)
memcpy(virtual, data, size);
BM_CKFATAL(pool->unmap(pool, buf->private));
}
out:
_glthread_UNLOCK_MUTEX(buf->mutex);
return retval;
}
void
driBOSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size, const void *data)
{
void *virtual;
assert(!buf->userBuffer); /* XXX just do a memcpy? */
_glthread_LOCK_MUTEX(buf->mutex);
if (size && data) {
BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
DRM_BO_FLAG_WRITE, 0, &buf->mutex,
&virtual));
memcpy((unsigned char *) virtual + offset, data, size);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
}
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void
driBOGetSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size, void *data)
{
void *virtual;
assert(!buf->userBuffer); /* XXX just do a memcpy? */
_glthread_LOCK_MUTEX(buf->mutex);
if (size && data) {
BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
DRM_BO_FLAG_READ, 0, &buf->mutex, &virtual));
memcpy(data, (unsigned char *) virtual + offset, size);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
}
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void
driBOSetReferenced(struct _DriBufferObject *buf,
unsigned long handle)
{
_glthread_LOCK_MUTEX(buf->mutex);
if (buf->private != NULL) {
_mesa_error(NULL, GL_INVALID_OPERATION,
"Invalid buffer for setReferenced\n");
BM_CKFATAL(-EINVAL);
}
if (buf->pool->reference == NULL) {
_mesa_error(NULL, GL_INVALID_OPERATION,
"Invalid buffer pool for setReferenced\n");
BM_CKFATAL(-EINVAL);
}
buf->private = buf->pool->reference(buf->pool, handle);
if (!buf->private) {
_mesa_error(NULL, GL_OUT_OF_MEMORY,
"Invalid buffer pool for setStatic\n");
BM_CKFATAL(-ENOMEM);
}
buf->createdByReference = GL_TRUE;
buf->flags = buf->pool->kernel(buf->pool, buf->private)->flags;
_glthread_UNLOCK_MUTEX(buf->mutex);
}
int
driGenBuffers(struct _DriBufferPool *pool,
const char *name,
unsigned n,
struct _DriBufferObject *buffers[],
unsigned alignment, uint64_t flags, unsigned hint)
{
struct _DriBufferObject *buf;
int i;
flags = (flags) ? flags : DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM |
DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE;
++num_buffers;
assert(pool);
for (i = 0; i < n; ++i) {
buf = (struct _DriBufferObject *) calloc(1, sizeof(*buf));
if (!buf)
return -ENOMEM;
_glthread_INIT_MUTEX(buf->mutex);
_glthread_LOCK_MUTEX(buf->mutex);
buf->refCount = 1;
buf->flags = flags;
buf->hint = hint;
buf->name = name;
buf->alignment = alignment;
buf->pool = pool;
buf->createdByReference = 0;
_glthread_UNLOCK_MUTEX(buf->mutex);
buffers[i] = buf;
}
return 0;
}
void
driGenUserBuffer(struct _DriBufferPool *pool,
const char *name,
struct _DriBufferObject **buffers,
void *ptr, unsigned bytes)
{
const unsigned alignment = 1, flags = 0, hint = 0;
--num_buffers; /* JB: is inced in GenBuffes */
driGenBuffers(pool, name, 1, buffers, alignment, flags, hint);
++num_user_buffers;
(*buffers)->userBuffer = 1;
(*buffers)->userData = ptr;
(*buffers)->userSize = bytes;
}
void
driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[])
{
int i;
for (i = 0; i < n; ++i) {
driBOUnReference(buffers[i]);
}
}
void
driInitBufMgr(int fd)
{
;
}
/*
* Note that lists are per-context and don't need mutex protection.
*/
struct _DriBufferList *
driBOCreateList(int target)
{
struct _DriBufferList *list = calloc(sizeof(*list), 1);
BM_CKFATAL(drmBOCreateList(target, &list->drmBuffers));
BM_CKFATAL(drmBOCreateList(target, &list->driBuffers));
return list;
}
int
driBOResetList(struct _DriBufferList * list)
{
int ret;
ret = drmBOResetList(&list->drmBuffers);
if (ret)
return ret;
ret = drmBOResetList(&list->driBuffers);
return ret;
}
void
driBOFreeList(struct _DriBufferList * list)
{
drmBOFreeList(&list->drmBuffers);
drmBOFreeList(&list->driBuffers);
free(list);
}
/*
* Copied from libdrm, because it is needed by driAddValidateItem.
*/
static drmBONode *
driAddListItem(drmBOList * list, drmBO * item,
uint64_t arg0, uint64_t arg1)
{
drmBONode *node;
drmMMListHead *l;
l = list->free.next;
if (l == &list->free) {
node = (drmBONode *) malloc(sizeof(*node));
if (!node) {
return NULL;
}
list->numCurrent++;
} else {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
}
memset(&node->bo_arg, 0, sizeof(node->bo_arg));
node->buf = item;
node->arg0 = arg0;
node->arg1 = arg1;
DRMLISTADDTAIL(&node->head, &list->list);
list->numOnList++;
return node;
}
/*
* Slightly modified version compared to the libdrm version.
* This one returns the list index of the buffer put on the list.
*/
static int
driAddValidateItem(drmBOList * list, drmBO * buf, uint64_t flags,
uint64_t mask, int *itemLoc,
struct _drmBONode **pnode)
{
drmBONode *node, *cur;
drmMMListHead *l;
int count = 0;
cur = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
if (node->buf == buf) {
cur = node;
break;
}
count++;
}
if (!cur) {
cur = driAddListItem(list, buf, flags, mask);
if (!cur)
return -ENOMEM;
cur->arg0 = flags;
cur->arg1 = mask;
} else {
uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
return -EINVAL;
}
cur->arg1 |= mask;
cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
(cur->arg0 & DRM_BO_MASK_MEM) == 0) {
return -EINVAL;
}
}
*itemLoc = count;
*pnode = cur;
return 0;
}
void
driBOAddListItem(struct _DriBufferList * list, struct _DriBufferObject *buf,
uint64_t flags, uint64_t mask, int *itemLoc,
struct _drmBONode **node)
{
int newItem;
_glthread_LOCK_MUTEX(buf->mutex);
BM_CKFATAL(driAddValidateItem(&list->drmBuffers,
buf->pool->kernel(buf->pool, buf->private),
flags, mask, itemLoc, node));
BM_CKFATAL(drmAddValidateItem(&list->driBuffers, (drmBO *) buf,
flags, mask, &newItem));
if (newItem)
buf->refCount++;
_glthread_UNLOCK_MUTEX(buf->mutex);
}
drmBOList *driGetdrmBOList(struct _DriBufferList *list)
{
driWriteLockKernelBO();
return &list->drmBuffers;
}
void driPutdrmBOList(struct _DriBufferList *list)
{
driWriteUnlockKernelBO();
}
void
driBOFence(struct _DriBufferObject *buf, struct _DriFenceObject *fence)
{
_glthread_LOCK_MUTEX(buf->mutex);
if (buf->pool->fence)
BM_CKFATAL(buf->pool->fence(buf->pool, buf->private, fence));
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void
driBOUnrefUserList(struct _DriBufferList *list)
{
struct _DriBufferObject *buf;
void *curBuf;
curBuf = drmBOListIterator(&list->driBuffers);
while (curBuf) {
buf = (struct _DriBufferObject *)drmBOListBuf(curBuf);
driBOUnReference(buf);
curBuf = drmBOListNext(&list->driBuffers, curBuf);
}
}
struct _DriFenceObject *
driBOFenceUserList(struct _DriFenceMgr *mgr,
struct _DriBufferList *list, const char *name,
drmFence *kFence)
{
struct _DriFenceObject *fence;
struct _DriBufferObject *buf;
void *curBuf;
fence = driFenceCreate(mgr, kFence->fence_class, kFence->type,
kFence, sizeof(*kFence));
curBuf = drmBOListIterator(&list->driBuffers);
/*
* User-space fencing callbacks.
*/
while (curBuf) {
buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
driBOFence(buf, fence);
driBOUnReference(buf);
curBuf = drmBOListNext(&list->driBuffers, curBuf);
}
driBOResetList(list);
return fence;
}
void
driBOValidateUserList(struct _DriBufferList * list)
{
void *curBuf;
struct _DriBufferObject *buf;
curBuf = drmBOListIterator(&list->driBuffers);
/*
* User-space validation callbacks.
*/
while (curBuf) {
buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
_glthread_LOCK_MUTEX(buf->mutex);
if (buf->pool->validate)
BM_CKFATAL(buf->pool->validate(buf->pool, buf->private, &buf->mutex));
_glthread_UNLOCK_MUTEX(buf->mutex);
curBuf = drmBOListNext(&list->driBuffers, curBuf);
}
}
void
driPoolTakeDown(struct _DriBufferPool *pool)
{
pool->takeDown(pool);
}
unsigned long
driBOSize(struct _DriBufferObject *buf)
{
unsigned long size;
_glthread_LOCK_MUTEX(buf->mutex);
size = buf->pool->size(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
return size;
}
drmBOList *driBOGetDRMBuffers(struct _DriBufferList *list)
{
return &list->drmBuffers;
}
drmBOList *driBOGetDRIBuffers(struct _DriBufferList *list)
{
return &list->driBuffers;
}
@@ -0,0 +1,138 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
*/
#ifndef _PSB_BUFMGR_H_
#define _PSB_BUFMGR_H_
#include <xf86mm.h>
#include "i915_drm.h"
#include "ws_dri_fencemgr.h"
typedef struct _drmBONode
{
drmMMListHead head;
drmBO *buf;
struct drm_i915_op_arg bo_arg;
uint64_t arg0;
uint64_t arg1;
} drmBONode;
typedef struct _drmBOList {
unsigned numTarget;
unsigned numCurrent;
unsigned numOnList;
drmMMListHead list;
drmMMListHead free;
} drmBOList;
struct _DriFenceObject;
struct _DriBufferObject;
struct _DriBufferPool;
struct _DriBufferList;
/*
* Return a pointer to the libdrm buffer object this DriBufferObject
* uses.
*/
extern drmBO *driBOKernel(struct _DriBufferObject *buf);
extern void *driBOMap(struct _DriBufferObject *buf, unsigned flags,
unsigned hint);
extern void driBOUnmap(struct _DriBufferObject *buf);
extern unsigned long driBOOffset(struct _DriBufferObject *buf);
extern unsigned long driBOPoolOffset(struct _DriBufferObject *buf);
extern uint64_t driBOFlags(struct _DriBufferObject *buf);
extern struct _DriBufferObject *driBOReference(struct _DriBufferObject *buf);
extern void driBOUnReference(struct _DriBufferObject *buf);
extern int driBOData(struct _DriBufferObject *r_buf,
unsigned size, const void *data,
struct _DriBufferPool *pool, uint64_t flags);
extern void driBOSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size,
const void *data);
extern void driBOGetSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size,
void *data);
extern int driGenBuffers(struct _DriBufferPool *pool,
const char *name,
unsigned n,
struct _DriBufferObject *buffers[],
unsigned alignment, uint64_t flags, unsigned hint);
extern void driGenUserBuffer(struct _DriBufferPool *pool,
const char *name,
struct _DriBufferObject *buffers[],
void *ptr, unsigned bytes);
extern void driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[]);
extern void driInitBufMgr(int fd);
extern struct _DriBufferList *driBOCreateList(int target);
extern int driBOResetList(struct _DriBufferList * list);
extern void driBOAddListItem(struct _DriBufferList * list,
struct _DriBufferObject *buf,
uint64_t flags, uint64_t mask, int *itemLoc,
struct _drmBONode **node);
extern void driBOValidateList(int fd, struct _DriBufferList * list);
extern void driBOFreeList(struct _DriBufferList * list);
extern struct _DriFenceObject *driBOFenceUserList(struct _DriFenceMgr *mgr,
struct _DriBufferList *list,
const char *name,
drmFence *kFence);
extern void driBOUnrefUserList(struct _DriBufferList *list);
extern void driBOValidateUserList(struct _DriBufferList * list);
extern drmBOList *driGetdrmBOList(struct _DriBufferList *list);
extern void driPutdrmBOList(struct _DriBufferList *list);
extern void driBOFence(struct _DriBufferObject *buf,
struct _DriFenceObject *fence);
extern void driPoolTakeDown(struct _DriBufferPool *pool);
extern void driBOSetReferenced(struct _DriBufferObject *buf,
unsigned long handle);
unsigned long driBOSize(struct _DriBufferObject *buf);
extern void driBOWaitIdle(struct _DriBufferObject *buf, int lazy);
extern void driPoolTakeDown(struct _DriBufferPool *pool);
extern void driReadLockKernelBO(void);
extern void driReadUnlockKernelBO(void);
extern void driWriteLockKernelBO(void);
extern void driWriteUnlockKernelBO(void);
/*
* For debugging purposes.
*/
extern drmBOList *driBOGetDRMBuffers(struct _DriBufferList *list);
extern drmBOList *driBOGetDRIBuffers(struct _DriBufferList *list);
#endif
@@ -0,0 +1,102 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#ifndef _PSB_BUFPOOL_H_
#define _PSB_BUFPOOL_H_
#include <xf86drm.h>
#include <glthread.h>
struct _DriFenceObject;
typedef struct _DriBufferPool
{
int fd;
int (*map) (struct _DriBufferPool * pool, void *private,
unsigned flags, int hint, _glthread_Mutex *mutex,
void **virtual);
int (*unmap) (struct _DriBufferPool * pool, void *private);
int (*destroy) (struct _DriBufferPool * pool, void *private);
unsigned long (*offset) (struct _DriBufferPool * pool, void *private);
unsigned long (*poolOffset) (struct _DriBufferPool * pool, void *private);
uint64_t (*flags) (struct _DriBufferPool * pool, void *private);
unsigned long (*size) (struct _DriBufferPool * pool, void *private);
void *(*create) (struct _DriBufferPool * pool, unsigned long size,
uint64_t flags, unsigned hint, unsigned alignment);
void *(*reference) (struct _DriBufferPool * pool, unsigned handle);
int (*unreference) (struct _DriBufferPool * pool, void *private);
int (*fence) (struct _DriBufferPool * pool, void *private,
struct _DriFenceObject * fence);
drmBO *(*kernel) (struct _DriBufferPool * pool, void *private);
int (*validate) (struct _DriBufferPool * pool, void *private, _glthread_Mutex *mutex);
int (*waitIdle) (struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex,
int lazy);
int (*setStatus) (struct _DriBufferPool *pool, void *private,
uint64_t flag_diff, uint64_t old_flags);
void (*takeDown) (struct _DriBufferPool * pool);
void *data;
} DriBufferPool;
extern void bmError(int val, const char *file, const char *function,
int line);
#define BM_CKFATAL(val) \
do{ \
int tstVal = (val); \
if (tstVal) \
bmError(tstVal, __FILE__, __FUNCTION__, __LINE__); \
} while(0);
/*
* Builtin pools.
*/
/*
* Kernel buffer objects. Size in multiples of page size. Page size aligned.
*/
extern struct _DriBufferPool *driDRMPoolInit(int fd);
extern struct _DriBufferPool *driMallocPoolInit(void);
struct _DriFreeSlabManager;
extern struct _DriBufferPool * driSlabPoolInit(int fd, uint64_t flags,
uint64_t validMask,
uint32_t smallestSize,
uint32_t numSizes,
uint32_t desiredNumBuffers,
uint32_t maxSlabSize,
uint32_t pageAlignment,
struct _DriFreeSlabManager *fMan);
extern void driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan);
extern struct _DriFreeSlabManager *
driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec);
#endif
@@ -0,0 +1,268 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <xf86drm.h>
#include <stdlib.h>
#include <unistd.h>
#include "ws_dri_bufpool.h"
#include "ws_dri_bufmgr.h"
#include "assert.h"
/*
* Buffer pool implementation using DRM buffer objects as DRI buffer objects.
*/
static void *
pool_create(struct _DriBufferPool *pool,
unsigned long size, uint64_t flags, unsigned hint,
unsigned alignment)
{
drmBO *buf = (drmBO *) malloc(sizeof(*buf));
int ret;
unsigned pageSize = getpagesize();
if (!buf)
return NULL;
if ((alignment > pageSize) && (alignment % pageSize)) {
free(buf);
return NULL;
}
ret = drmBOCreate(pool->fd, size, alignment / pageSize,
NULL,
flags, hint, buf);
if (ret) {
free(buf);
return NULL;
}
return (void *) buf;
}
static void *
pool_reference(struct _DriBufferPool *pool, unsigned handle)
{
drmBO *buf = (drmBO *) malloc(sizeof(*buf));
int ret;
if (!buf)
return NULL;
ret = drmBOReference(pool->fd, handle, buf);
if (ret) {
free(buf);
return NULL;
}
return (void *) buf;
}
static int
pool_destroy(struct _DriBufferPool *pool, void *private)
{
int ret;
drmBO *buf = (drmBO *) private;
driReadLockKernelBO();
ret = drmBOUnreference(pool->fd, buf);
free(buf);
driReadUnlockKernelBO();
return ret;
}
static int
pool_unreference(struct _DriBufferPool *pool, void *private)
{
int ret;
drmBO *buf = (drmBO *) private;
driReadLockKernelBO();
ret = drmBOUnreference(pool->fd, buf);
free(buf);
driReadUnlockKernelBO();
return ret;
}
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, _glthread_Mutex *mutex, void **virtual)
{
drmBO *buf = (drmBO *) private;
int ret;
driReadLockKernelBO();
ret = drmBOMap(pool->fd, buf, flags, hint, virtual);
driReadUnlockKernelBO();
return ret;
}
static int
pool_unmap(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
int ret;
driReadLockKernelBO();
ret = drmBOUnmap(pool->fd, buf);
driReadUnlockKernelBO();
return ret;
}
static unsigned long
pool_offset(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
unsigned long offset;
driReadLockKernelBO();
assert(buf->flags & DRM_BO_FLAG_NO_MOVE);
offset = buf->offset;
driReadUnlockKernelBO();
return buf->offset;
}
static unsigned long
pool_poolOffset(struct _DriBufferPool *pool, void *private)
{
return 0;
}
static uint64_t
pool_flags(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
uint64_t flags;
driReadLockKernelBO();
flags = buf->flags;
driReadUnlockKernelBO();
return flags;
}
static unsigned long
pool_size(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
unsigned long size;
driReadLockKernelBO();
size = buf->size;
driReadUnlockKernelBO();
return buf->size;
}
static int
pool_fence(struct _DriBufferPool *pool, void *private,
struct _DriFenceObject *fence)
{
/*
* Noop. The kernel handles all fencing.
*/
return 0;
}
static drmBO *
pool_kernel(struct _DriBufferPool *pool, void *private)
{
return (drmBO *) private;
}
static int
pool_waitIdle(struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex,
int lazy)
{
drmBO *buf = (drmBO *) private;
int ret;
driReadLockKernelBO();
ret = drmBOWaitIdle(pool->fd, buf, (lazy) ? DRM_BO_HINT_WAIT_LAZY:0);
driReadUnlockKernelBO();
return ret;
}
static void
pool_takedown(struct _DriBufferPool *pool)
{
free(pool);
}
/*static int
pool_setStatus(struct _DriBufferPool *pool, void *private,
uint64_t flag_diff, uint64_t old_flags)
{
drmBO *buf = (drmBO *) private;
uint64_t new_flags = old_flags ^ flag_diff;
int ret;
driReadLockKernelBO();
ret = drmBOSetStatus(pool->fd, buf, new_flags, flag_diff,
0, 0, 0);
driReadUnlockKernelBO();
return ret;
}*/
struct _DriBufferPool *
driDRMPoolInit(int fd)
{
struct _DriBufferPool *pool;
pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
if (!pool)
return NULL;
pool->fd = fd;
pool->map = &pool_map;
pool->unmap = &pool_unmap;
pool->destroy = &pool_destroy;
pool->offset = &pool_offset;
pool->poolOffset = &pool_poolOffset;
pool->flags = &pool_flags;
pool->size = &pool_size;
pool->create = &pool_create;
pool->fence = &pool_fence;
pool->kernel = &pool_kernel;
pool->validate = NULL;
pool->waitIdle = &pool_waitIdle;
pool->takeDown = &pool_takedown;
pool->reference = &pool_reference;
pool->unreference = &pool_unreference;
pool->data = NULL;
return pool;
}
@@ -0,0 +1,377 @@
#include "ws_dri_fencemgr.h"
#include "glthread.h"
#include <xf86mm.h>
#include <string.h>
#include <unistd.h>
/*
* Note: Locking order is
* _DriFenceObject::mutex
* _DriFenceMgr::mutex
*/
struct _DriFenceMgr {
/*
* Constant members. Need no mutex protection.
*/
struct _DriFenceMgrCreateInfo info;
void *private;
/*
* These members are protected by this->mutex
*/
_glthread_Mutex mutex;
int refCount;
drmMMListHead *heads;
int num_fences;
};
struct _DriFenceObject {
/*
* These members are constant and need no mutex protection.
*/
struct _DriFenceMgr *mgr;
uint32_t fence_class;
uint32_t fence_type;
/*
* These members are protected by mgr->mutex.
*/
drmMMListHead head;
int refCount;
/*
* These members are protected by this->mutex.
*/
_glthread_Mutex mutex;
uint32_t signaled_type;
void *private;
};
uint32_t
driFenceType(struct _DriFenceObject *fence)
{
return fence->fence_type;
}
struct _DriFenceMgr *
driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info)
{
struct _DriFenceMgr *tmp;
uint32_t i;
tmp = calloc(1, sizeof(*tmp));
if (!tmp)
return NULL;
_glthread_INIT_MUTEX(tmp->mutex);
_glthread_LOCK_MUTEX(tmp->mutex);
tmp->refCount = 1;
tmp->info = *info;
tmp->num_fences = 0;
tmp->heads = calloc(tmp->info.num_classes, sizeof(*tmp->heads));
if (!tmp->heads)
goto out_err;
for (i=0; i<tmp->info.num_classes; ++i) {
DRMINITLISTHEAD(&tmp->heads[i]);
}
_glthread_UNLOCK_MUTEX(tmp->mutex);
return tmp;
out_err:
if (tmp)
free(tmp);
return NULL;
}
static void
driFenceMgrUnrefUnlock(struct _DriFenceMgr **pMgr)
{
struct _DriFenceMgr *mgr = *pMgr;
*pMgr = NULL;
if (--mgr->refCount == 0)
free(mgr);
else
_glthread_UNLOCK_MUTEX(mgr->mutex);
}
void
driFenceMgrUnReference(struct _DriFenceMgr **pMgr)
{
_glthread_LOCK_MUTEX((*pMgr)->mutex);
driFenceMgrUnrefUnlock(pMgr);
}
static void
driFenceUnReferenceLocked(struct _DriFenceObject **pFence)
{
struct _DriFenceObject *fence = *pFence;
struct _DriFenceMgr *mgr = fence->mgr;
*pFence = NULL;
if (--fence->refCount == 0) {
DRMLISTDELINIT(&fence->head);
if (fence->private)
mgr->info.unreference(mgr, &fence->private);
--mgr->num_fences;
fence->mgr = NULL;
--mgr->refCount;
free(fence);
}
}
static void
driSignalPreviousFencesLocked(struct _DriFenceMgr *mgr,
drmMMListHead *list,
uint32_t fence_class,
uint32_t fence_type)
{
struct _DriFenceObject *entry;
drmMMListHead *prev;
while(list != &mgr->heads[fence_class]) {
entry = DRMLISTENTRY(struct _DriFenceObject, list, head);
/*
* Up refcount so that entry doesn't disappear from under us
* when we unlock-relock mgr to get the correct locking order.
*/
++entry->refCount;
_glthread_UNLOCK_MUTEX(mgr->mutex);
_glthread_LOCK_MUTEX(entry->mutex);
_glthread_LOCK_MUTEX(mgr->mutex);
prev = list->prev;
if (list->prev == list) {
/*
* Somebody else removed the entry from the list.
*/
_glthread_UNLOCK_MUTEX(entry->mutex);
driFenceUnReferenceLocked(&entry);
return;
}
entry->signaled_type |= (fence_type & entry->fence_type);
if (entry->signaled_type == entry->fence_type) {
DRMLISTDELINIT(list);
mgr->info.unreference(mgr, &entry->private);
}
_glthread_UNLOCK_MUTEX(entry->mutex);
driFenceUnReferenceLocked(&entry);
list = prev;
}
}
int
driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
int lazy_hint)
{
struct _DriFenceMgr *mgr = fence->mgr;
int ret = 0;
_glthread_LOCK_MUTEX(fence->mutex);
if ((fence->signaled_type & fence_type) == fence_type)
goto out0;
ret = mgr->info.finish(mgr, fence->private, fence_type, lazy_hint);
if (ret)
goto out0;
_glthread_LOCK_MUTEX(mgr->mutex);
_glthread_UNLOCK_MUTEX(fence->mutex);
driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
fence_type);
_glthread_UNLOCK_MUTEX(mgr->mutex);
return 0;
out0:
_glthread_UNLOCK_MUTEX(fence->mutex);
return ret;
}
uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence)
{
uint32_t ret;
_glthread_LOCK_MUTEX(fence->mutex);
ret = fence->signaled_type;
_glthread_UNLOCK_MUTEX(fence->mutex);
return ret;
}
int
driFenceSignaledType(struct _DriFenceObject *fence, uint32_t flush_type,
uint32_t *signaled)
{
int ret = 0;
struct _DriFenceMgr *mgr;
_glthread_LOCK_MUTEX(fence->mutex);
mgr = fence->mgr;
*signaled = fence->signaled_type;
if ((fence->signaled_type & flush_type) == flush_type)
goto out0;
ret = mgr->info.signaled(mgr, fence->private, flush_type, signaled);
if (ret) {
*signaled = fence->signaled_type;
goto out0;
}
if ((fence->signaled_type | *signaled) == fence->signaled_type)
goto out0;
_glthread_LOCK_MUTEX(mgr->mutex);
_glthread_UNLOCK_MUTEX(fence->mutex);
driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
*signaled);
_glthread_UNLOCK_MUTEX(mgr->mutex);
return 0;
out0:
_glthread_UNLOCK_MUTEX(fence->mutex);
return ret;
}
struct _DriFenceObject *
driFenceReference(struct _DriFenceObject *fence)
{
_glthread_LOCK_MUTEX(fence->mgr->mutex);
++fence->refCount;
_glthread_UNLOCK_MUTEX(fence->mgr->mutex);
return fence;
}
void
driFenceUnReference(struct _DriFenceObject **pFence)
{
struct _DriFenceMgr *mgr;
if (*pFence == NULL)
return;
mgr = (*pFence)->mgr;
_glthread_LOCK_MUTEX(mgr->mutex);
++mgr->refCount;
driFenceUnReferenceLocked(pFence);
driFenceMgrUnrefUnlock(&mgr);
}
struct _DriFenceObject
*driFenceCreate(struct _DriFenceMgr *mgr, uint32_t fence_class,
uint32_t fence_type, void *private, size_t private_size)
{
struct _DriFenceObject *fence;
size_t fence_size = sizeof(*fence);
if (private_size)
fence_size = ((fence_size + 15) & ~15);
fence = calloc(1, fence_size + private_size);
if (!fence) {
int ret = mgr->info.finish(mgr, private, fence_type, 0);
if (ret)
usleep(10000000);
return NULL;
}
_glthread_INIT_MUTEX(fence->mutex);
_glthread_LOCK_MUTEX(fence->mutex);
_glthread_LOCK_MUTEX(mgr->mutex);
fence->refCount = 1;
DRMLISTADDTAIL(&fence->head, &mgr->heads[fence_class]);
fence->mgr = mgr;
++mgr->refCount;
++mgr->num_fences;
_glthread_UNLOCK_MUTEX(mgr->mutex);
fence->fence_class = fence_class;
fence->fence_type = fence_type;
fence->signaled_type = 0;
fence->private = private;
if (private_size) {
fence->private = (void *)(((uint8_t *) fence) + fence_size);
memcpy(fence->private, private, private_size);
}
_glthread_UNLOCK_MUTEX(fence->mutex);
return fence;
}
static int
tSignaled(struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
uint32_t *signaled_type)
{
long fd = (long) mgr->private;
int dummy;
drmFence *fence = (drmFence *) private;
int ret;
*signaled_type = 0;
ret = drmFenceSignaled((int) fd, fence, flush_type, &dummy);
if (ret)
return ret;
*signaled_type = fence->signaled;
return 0;
}
static int
tFinish(struct _DriFenceMgr *mgr, void *private, uint32_t fence_type,
int lazy_hint)
{
long fd = (long) mgr->private;
unsigned flags = lazy_hint ? DRM_FENCE_FLAG_WAIT_LAZY : 0;
return drmFenceWait((int)fd, flags, (drmFence *) private, fence_type);
}
static int
tUnref(struct _DriFenceMgr *mgr, void **private)
{
long fd = (long) mgr->private;
drmFence *fence = (drmFence *) *private;
*private = NULL;
return drmFenceUnreference(fd, fence);
}
struct _DriFenceMgr *driFenceMgrTTMInit(int fd)
{
struct _DriFenceMgrCreateInfo info;
struct _DriFenceMgr *mgr;
info.flags = DRI_FENCE_CLASS_ORDERED;
info.num_classes = 4;
info.signaled = tSignaled;
info.finish = tFinish;
info.unreference = tUnref;
mgr = driFenceMgrCreate(&info);
if (mgr == NULL)
return NULL;
mgr->private = (void *) (long) fd;
return mgr;
}
@@ -0,0 +1,115 @@
#ifndef DRI_FENCEMGR_H
#define DRI_FENCEMGR_H
#include <stdint.h>
#include <stdlib.h>
struct _DriFenceObject;
struct _DriFenceMgr;
/*
* Do a quick check to see if the fence manager has registered the fence
* object as signaled. Note that this function may return a false negative
* answer.
*/
extern uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence);
/*
* Check if the fence object is signaled. This function can be substantially
* more expensive to call than the above function, but will not return a false
* negative answer. The argument "flush_type" sets the types that the
* underlying mechanism must make sure will eventually signal.
*/
extern int driFenceSignaledType(struct _DriFenceObject *fence,
uint32_t flush_type, uint32_t *signaled);
/*
* Convenience functions.
*/
static inline int driFenceSignaled(struct _DriFenceObject *fence,
uint32_t flush_type)
{
uint32_t signaled_types;
int ret = driFenceSignaledType(fence, flush_type, &signaled_types);
if (ret)
return 0;
return ((signaled_types & flush_type) == flush_type);
}
static inline int driFenceSignaledCached(struct _DriFenceObject *fence,
uint32_t flush_type)
{
uint32_t signaled_types =
driFenceSignaledTypeCached(fence);
return ((signaled_types & flush_type) == flush_type);
}
/*
* Reference a fence object.
*/
extern struct _DriFenceObject *driFenceReference(struct _DriFenceObject *fence);
/*
* Unreference a fence object. The fence object pointer will be reset to NULL.
*/
extern void driFenceUnReference(struct _DriFenceObject **pFence);
/*
* Wait for a fence to signal the indicated fence_type.
* If "lazy_hint" is true, it indicates that the wait may sleep to avoid
* busy-wait polling.
*/
extern int driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
int lazy_hint);
/*
* Create a DriFenceObject for manager "mgr".
*
* "private" is a pointer that should be used for the callbacks in
* struct _DriFenceMgrCreateInfo.
*
* if private_size is nonzero, then the info stored at *private, with size
* private size will be copied and the fence manager will instead use a
* pointer to the copied data for the callbacks in
* struct _DriFenceMgrCreateInfo. In that case, the object pointed to by
* "private" may be destroyed after the call to driFenceCreate.
*/
extern struct _DriFenceObject *driFenceCreate(struct _DriFenceMgr *mgr,
uint32_t fence_class,
uint32_t fence_type,
void *private,
size_t private_size);
extern uint32_t driFenceType(struct _DriFenceObject *fence);
/*
* Fence creations are ordered. If a fence signals a fence_type,
* it is safe to assume that all fences of the same class that was
* created before that fence has signaled the same type.
*/
#define DRI_FENCE_CLASS_ORDERED (1 << 0)
struct _DriFenceMgrCreateInfo {
uint32_t flags;
uint32_t num_classes;
int (*signaled) (struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
uint32_t *signaled_type);
int (*finish) (struct _DriFenceMgr *mgr, void *private, uint32_t fence_type, int lazy_hint);
int (*unreference) (struct _DriFenceMgr *mgr, void **private);
};
extern struct _DriFenceMgr *
driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info);
void
driFenceMgrUnReference(struct _DriFenceMgr **pMgr);
extern struct _DriFenceMgr *
driFenceMgrTTMInit(int fd);
#endif
@@ -0,0 +1,162 @@
/**************************************************************************
*
* Copyright 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <xf86drm.h>
#include <stdlib.h>
#include <errno.h>
#include "imports.h"
#include "glthread.h"
#include "ws_dri_bufpool.h"
#include "ws_dri_bufmgr.h"
#include "intel_screen.h"
static void *
pool_create(struct _DriBufferPool *pool,
unsigned long size, uint64_t flags, unsigned hint,
unsigned alignment)
{
unsigned long *private = malloc(size + 2*sizeof(unsigned long));
if ((flags & DRM_BO_MASK_MEM) != DRM_BO_FLAG_MEM_LOCAL)
abort();
*private = size;
return (void *)private;
}
static int
pool_destroy(struct _DriBufferPool *pool, void *private)
{
free(private);
return 0;
}
static int
pool_waitIdle(struct _DriBufferPool *pool, void *private,
_glthread_Mutex *mutex, int lazy)
{
return 0;
}
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, _glthread_Mutex *mutex, void **virtual)
{
*virtual = (void *)((unsigned long *)private + 2);
return 0;
}
static int
pool_unmap(struct _DriBufferPool *pool, void *private)
{
return 0;
}
static unsigned long
pool_offset(struct _DriBufferPool *pool, void *private)
{
/*
* BUG
*/
abort();
return 0UL;
}
static unsigned long
pool_poolOffset(struct _DriBufferPool *pool, void *private)
{
/*
* BUG
*/
abort();
}
static uint64_t
pool_flags(struct _DriBufferPool *pool, void *private)
{
return DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
}
static unsigned long
pool_size(struct _DriBufferPool *pool, void *private)
{
return *(unsigned long *) private;
}
static int
pool_fence(struct _DriBufferPool *pool, void *private,
struct _DriFenceObject *fence)
{
abort();
return 0UL;
}
static drmBO *
pool_kernel(struct _DriBufferPool *pool, void *private)
{
abort();
return NULL;
}
static void
pool_takedown(struct _DriBufferPool *pool)
{
free(pool);
}
struct _DriBufferPool *
driMallocPoolInit(void)
{
struct _DriBufferPool *pool;
pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
if (!pool)
return NULL;
pool->data = NULL;
pool->fd = -1;
pool->map = &pool_map;
pool->unmap = &pool_unmap;
pool->destroy = &pool_destroy;
pool->offset = &pool_offset;
pool->poolOffset = &pool_poolOffset;
pool->flags = &pool_flags;
pool->size = &pool_size;
pool->create = &pool_create;
pool->fence = &pool_fence;
pool->kernel = &pool_kernel;
pool->validate = NULL;
pool->waitIdle = &pool_waitIdle;
pool->takeDown = &pool_takedown;
return pool;
}
@@ -0,0 +1,968 @@
/**************************************************************************
*
* Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
*/
#include <stdint.h>
#include <sys/time.h>
#include <errno.h>
#include <unistd.h>
#include <assert.h>
#include "ws_dri_bufpool.h"
#include "ws_dri_fencemgr.h"
#include "ws_dri_bufmgr.h"
#include "glthread.h"
#define DRI_SLABPOOL_ALLOC_RETRIES 100
struct _DriSlab;
struct _DriSlabBuffer {
int isSlabBuffer;
drmBO *bo;
struct _DriFenceObject *fence;
struct _DriSlab *parent;
drmMMListHead head;
uint32_t mapCount;
uint32_t start;
uint32_t fenceType;
int unFenced;
_glthread_Cond event;
};
struct _DriKernelBO {
int fd;
drmBO bo;
drmMMListHead timeoutHead;
drmMMListHead head;
struct timeval timeFreed;
uint32_t pageAlignment;
void *virtual;
};
struct _DriSlab{
drmMMListHead head;
drmMMListHead freeBuffers;
uint32_t numBuffers;
uint32_t numFree;
struct _DriSlabBuffer *buffers;
struct _DriSlabSizeHeader *header;
struct _DriKernelBO *kbo;
};
struct _DriSlabSizeHeader {
drmMMListHead slabs;
drmMMListHead freeSlabs;
drmMMListHead delayedBuffers;
uint32_t numDelayed;
struct _DriSlabPool *slabPool;
uint32_t bufSize;
_glthread_Mutex mutex;
};
struct _DriFreeSlabManager {
struct timeval slabTimeout;
struct timeval checkInterval;
struct timeval nextCheck;
drmMMListHead timeoutList;
drmMMListHead unCached;
drmMMListHead cached;
_glthread_Mutex mutex;
};
struct _DriSlabPool {
/*
* The data of this structure remains constant after
* initialization and thus needs no mutex protection.
*/
struct _DriFreeSlabManager *fMan;
uint64_t proposedFlags;
uint64_t validMask;
uint32_t *bucketSizes;
uint32_t numBuckets;
uint32_t pageSize;
int fd;
int pageAlignment;
int maxSlabSize;
int desiredNumBuffers;
struct _DriSlabSizeHeader *headers;
};
/*
* FIXME: Perhaps arrange timeout slabs in size buckets for fast
* retreival??
*/
static inline int
driTimeAfterEq(struct timeval *arg1, struct timeval *arg2)
{
return ((arg1->tv_sec > arg2->tv_sec) ||
((arg1->tv_sec == arg2->tv_sec) &&
(arg1->tv_usec > arg2->tv_usec)));
}
static inline void
driTimeAdd(struct timeval *arg, struct timeval *add)
{
unsigned int sec;
arg->tv_sec += add->tv_sec;
arg->tv_usec += add->tv_usec;
sec = arg->tv_usec / 1000000;
arg->tv_sec += sec;
arg->tv_usec -= sec*1000000;
}
static void
driFreeKernelBO(struct _DriKernelBO *kbo)
{
if (!kbo)
return;
(void) drmBOUnreference(kbo->fd, &kbo->bo);
free(kbo);
}
static void
driFreeTimeoutKBOsLocked(struct _DriFreeSlabManager *fMan,
struct timeval *time)
{
drmMMListHead *list, *next;
struct _DriKernelBO *kbo;
if (!driTimeAfterEq(time, &fMan->nextCheck))
return;
for (list = fMan->timeoutList.next, next = list->next;
list != &fMan->timeoutList;
list = next, next = list->next) {
kbo = DRMLISTENTRY(struct _DriKernelBO, list, timeoutHead);
if (!driTimeAfterEq(time, &kbo->timeFreed))
break;
DRMLISTDELINIT(&kbo->timeoutHead);
DRMLISTDELINIT(&kbo->head);
driFreeKernelBO(kbo);
}
fMan->nextCheck = *time;
driTimeAdd(&fMan->nextCheck, &fMan->checkInterval);
}
/*
* Add a _DriKernelBO to the free slab manager.
* This means that it is available for reuse, but if it's not
* reused in a while, it will be freed.
*/
static void
driSetKernelBOFree(struct _DriFreeSlabManager *fMan,
struct _DriKernelBO *kbo)
{
struct timeval time;
_glthread_LOCK_MUTEX(fMan->mutex);
gettimeofday(&time, NULL);
driTimeAdd(&time, &fMan->slabTimeout);
kbo->timeFreed = time;
if (kbo->bo.flags & DRM_BO_FLAG_CACHED)
DRMLISTADD(&kbo->head, &fMan->cached);
else
DRMLISTADD(&kbo->head, &fMan->unCached);
DRMLISTADDTAIL(&kbo->timeoutHead, &fMan->timeoutList);
driFreeTimeoutKBOsLocked(fMan, &time);
_glthread_UNLOCK_MUTEX(fMan->mutex);
}
/*
* Get a _DriKernelBO for us to use as storage for a slab.
*
*/
static struct _DriKernelBO *
driAllocKernelBO(struct _DriSlabSizeHeader *header)
{
struct _DriSlabPool *slabPool = header->slabPool;
struct _DriFreeSlabManager *fMan = slabPool->fMan;
drmMMListHead *list, *next, *head;
uint32_t size = header->bufSize * slabPool->desiredNumBuffers;
struct _DriKernelBO *kbo;
struct _DriKernelBO *kboTmp;
int ret;
/*
* FIXME: We should perhaps allow some variation in slabsize in order
* to efficiently reuse slabs.
*/
size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize;
size = (size + slabPool->pageSize - 1) & ~(slabPool->pageSize - 1);
_glthread_LOCK_MUTEX(fMan->mutex);
kbo = NULL;
retry:
head = (slabPool->proposedFlags & DRM_BO_FLAG_CACHED) ?
&fMan->cached : &fMan->unCached;
for (list = head->next, next = list->next;
list != head;
list = next, next = list->next) {
kboTmp = DRMLISTENTRY(struct _DriKernelBO, list, head);
if ((kboTmp->bo.size == size) &&
(slabPool->pageAlignment == 0 ||
(kboTmp->pageAlignment % slabPool->pageAlignment) == 0)) {
if (!kbo)
kbo = kboTmp;
if ((kbo->bo.proposedFlags ^ slabPool->proposedFlags) == 0)
break;
}
}
if (kbo) {
DRMLISTDELINIT(&kbo->head);
DRMLISTDELINIT(&kbo->timeoutHead);
}
_glthread_UNLOCK_MUTEX(fMan->mutex);
if (kbo) {
uint64_t new_mask = kbo->bo.proposedFlags ^ slabPool->proposedFlags;
ret = 0;
if (new_mask) {
ret = drmBOSetStatus(kbo->fd, &kbo->bo, slabPool->proposedFlags,
new_mask, DRM_BO_HINT_DONT_FENCE, 0, 0);
}
if (ret == 0)
return kbo;
driFreeKernelBO(kbo);
kbo = NULL;
goto retry;
}
kbo = calloc(1, sizeof(struct _DriKernelBO));
if (!kbo)
return NULL;
kbo->fd = slabPool->fd;
DRMINITLISTHEAD(&kbo->head);
DRMINITLISTHEAD(&kbo->timeoutHead);
ret = drmBOCreate(kbo->fd, size, slabPool->pageAlignment, NULL,
slabPool->proposedFlags,
DRM_BO_HINT_DONT_FENCE, &kbo->bo);
if (ret)
goto out_err0;
ret = drmBOMap(kbo->fd, &kbo->bo,
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
0, &kbo->virtual);
if (ret)
goto out_err1;
ret = drmBOUnmap(kbo->fd, &kbo->bo);
if (ret)
goto out_err1;
return kbo;
out_err1:
drmBOUnreference(kbo->fd, &kbo->bo);
out_err0:
free(kbo);
return NULL;
}
static int
driAllocSlab(struct _DriSlabSizeHeader *header)
{
struct _DriSlab *slab;
struct _DriSlabBuffer *buf;
uint32_t numBuffers;
int ret;
int i;
slab = calloc(1, sizeof(*slab));
if (!slab)
return -ENOMEM;
slab->kbo = driAllocKernelBO(header);
if (!slab->kbo) {
ret = -ENOMEM;
goto out_err0;
}
numBuffers = slab->kbo->bo.size / header->bufSize;
slab->buffers = calloc(numBuffers, sizeof(*slab->buffers));
if (!slab->buffers) {
ret = -ENOMEM;
goto out_err1;
}
DRMINITLISTHEAD(&slab->head);
DRMINITLISTHEAD(&slab->freeBuffers);
slab->numBuffers = numBuffers;
slab->numFree = 0;
slab->header = header;
buf = slab->buffers;
for (i=0; i < numBuffers; ++i) {
buf->parent = slab;
buf->start = i* header->bufSize;
buf->mapCount = 0;
buf->isSlabBuffer = 1;
_glthread_INIT_COND(buf->event);
DRMLISTADDTAIL(&buf->head, &slab->freeBuffers);
slab->numFree++;
buf++;
}
DRMLISTADDTAIL(&slab->head, &header->slabs);
return 0;
out_err1:
driSetKernelBOFree(header->slabPool->fMan, slab->kbo);
free(slab->buffers);
out_err0:
free(slab);
return ret;
}
/*
* Delete a buffer from the slab header delayed list and put
* it on the slab free list.
*/
static void
driSlabFreeBufferLocked(struct _DriSlabBuffer *buf)
{
struct _DriSlab *slab = buf->parent;
struct _DriSlabSizeHeader *header = slab->header;
drmMMListHead *list = &buf->head;
DRMLISTDEL(list);
DRMLISTADDTAIL(list, &slab->freeBuffers);
slab->numFree++;
if (slab->head.next == &slab->head)
DRMLISTADDTAIL(&slab->head, &header->slabs);
if (slab->numFree == slab->numBuffers) {
list = &slab->head;
DRMLISTDEL(list);
DRMLISTADDTAIL(list, &header->freeSlabs);
}
if (header->slabs.next == &header->slabs ||
slab->numFree != slab->numBuffers) {
drmMMListHead *next;
struct _DriFreeSlabManager *fMan = header->slabPool->fMan;
for (list = header->freeSlabs.next, next = list->next;
list != &header->freeSlabs;
list = next, next = list->next) {
slab = DRMLISTENTRY(struct _DriSlab, list, head);
DRMLISTDELINIT(list);
driSetKernelBOFree(fMan, slab->kbo);
free(slab->buffers);
free(slab);
}
}
}
static void
driSlabCheckFreeLocked(struct _DriSlabSizeHeader *header, int wait)
{
drmMMListHead *list, *prev, *first;
struct _DriSlabBuffer *buf;
struct _DriSlab *slab;
int firstWasSignaled = 1;
int signaled;
int i;
int ret;
/*
* Rerun the freeing test if the youngest tested buffer
* was signaled, since there might be more idle buffers
* in the delay list.
*/
while (firstWasSignaled) {
firstWasSignaled = 0;
signaled = 0;
first = header->delayedBuffers.next;
/* Only examine the oldest 1/3 of delayed buffers:
*/
if (header->numDelayed > 3) {
for (i = 0; i < header->numDelayed; i += 3) {
first = first->next;
}
}
for (list = first, prev = list->prev;
list != &header->delayedBuffers;
list = prev, prev = list->prev) {
buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
slab = buf->parent;
if (!signaled) {
if (wait) {
ret = driFenceFinish(buf->fence, buf->fenceType, 0);
if (ret)
break;
signaled = 1;
wait = 0;
} else {
signaled = driFenceSignaled(buf->fence, buf->fenceType);
}
if (signaled) {
if (list == first)
firstWasSignaled = 1;
driFenceUnReference(&buf->fence);
header->numDelayed--;
driSlabFreeBufferLocked(buf);
}
} else if (driFenceSignaledCached(buf->fence, buf->fenceType)) {
driFenceUnReference(&buf->fence);
header->numDelayed--;
driSlabFreeBufferLocked(buf);
}
}
}
}
static struct _DriSlabBuffer *
driSlabAllocBuffer(struct _DriSlabSizeHeader *header)
{
static struct _DriSlabBuffer *buf;
struct _DriSlab *slab;
drmMMListHead *list;
int count = DRI_SLABPOOL_ALLOC_RETRIES;
_glthread_LOCK_MUTEX(header->mutex);
while(header->slabs.next == &header->slabs && count > 0) {
driSlabCheckFreeLocked(header, 0);
if (header->slabs.next != &header->slabs)
break;
_glthread_UNLOCK_MUTEX(header->mutex);
if (count != DRI_SLABPOOL_ALLOC_RETRIES)
usleep(1);
_glthread_LOCK_MUTEX(header->mutex);
(void) driAllocSlab(header);
count--;
}
list = header->slabs.next;
if (list == &header->slabs) {
_glthread_UNLOCK_MUTEX(header->mutex);
return NULL;
}
slab = DRMLISTENTRY(struct _DriSlab, list, head);
if (--slab->numFree == 0)
DRMLISTDELINIT(list);
list = slab->freeBuffers.next;
DRMLISTDELINIT(list);
_glthread_UNLOCK_MUTEX(header->mutex);
buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
return buf;
}
static void *
pool_create(struct _DriBufferPool *driPool, unsigned long size,
uint64_t flags, unsigned hint, unsigned alignment)
{
struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
struct _DriSlabSizeHeader *header;
struct _DriSlabBuffer *buf;
void *dummy;
int i;
int ret;
/*
* FIXME: Check for compatibility.
*/
header = pool->headers;
for (i=0; i<pool->numBuckets; ++i) {
if (header->bufSize >= size)
break;
header++;
}
if (i < pool->numBuckets)
return driSlabAllocBuffer(header);
/*
* Fall back to allocate a buffer object directly from DRM.
* and wrap it in a driBO structure.
*/
buf = calloc(1, sizeof(*buf));
if (!buf)
return NULL;
buf->bo = calloc(1, sizeof(*buf->bo));
if (!buf->bo)
goto out_err0;
if (alignment) {
if ((alignment < pool->pageSize) && (pool->pageSize % alignment))
goto out_err1;
if ((alignment > pool->pageSize) && (alignment % pool->pageSize))
goto out_err1;
}
ret = drmBOCreate(pool->fd, size, alignment / pool->pageSize, NULL,
flags, hint, buf->bo);
if (ret)
goto out_err1;
ret = drmBOMap(pool->fd, buf->bo, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
0, &dummy);
if (ret)
goto out_err2;
ret = drmBOUnmap(pool->fd, buf->bo);
if (ret)
goto out_err2;
return buf;
out_err2:
drmBOUnreference(pool->fd, buf->bo);
out_err1:
free(buf->bo);
out_err0:
free(buf);
return NULL;
}
static int
pool_destroy(struct _DriBufferPool *driPool, void *private)
{
struct _DriSlabBuffer *buf =
(struct _DriSlabBuffer *) private;
struct _DriSlab *slab;
struct _DriSlabSizeHeader *header;
if (!buf->isSlabBuffer) {
struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
int ret;
ret = drmBOUnreference(pool->fd, buf->bo);
free(buf->bo);
free(buf);
return ret;
}
slab = buf->parent;
header = slab->header;
_glthread_LOCK_MUTEX(header->mutex);
buf->unFenced = 0;
buf->mapCount = 0;
if (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType)) {
DRMLISTADDTAIL(&buf->head, &header->delayedBuffers);
header->numDelayed++;
} else {
if (buf->fence)
driFenceUnReference(&buf->fence);
driSlabFreeBufferLocked(buf);
}
_glthread_UNLOCK_MUTEX(header->mutex);
return 0;
}
static int
pool_waitIdle(struct _DriBufferPool *driPool, void *private,
_glthread_Mutex *mutex, int lazy)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
while(buf->unFenced)
_glthread_COND_WAIT(buf->event, *mutex);
if (!buf->fence)
return 0;
driFenceFinish(buf->fence, buf->fenceType, lazy);
driFenceUnReference(&buf->fence);
return 0;
}
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, _glthread_Mutex *mutex, void **virtual)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
int busy;
if (buf->isSlabBuffer)
busy = buf->unFenced || (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType));
else
busy = buf->fence && !driFenceSignaled(buf->fence, buf->fenceType);
if (busy) {
if (hint & DRM_BO_HINT_DONT_BLOCK)
return -EBUSY;
else {
(void) pool_waitIdle(pool, private, mutex, 0);
}
}
++buf->mapCount;
*virtual = (buf->isSlabBuffer) ?
(void *) ((uint8_t *) buf->parent->kbo->virtual + buf->start) :
(void *) buf->bo->virtual;
return 0;
}
static int
pool_unmap(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
--buf->mapCount;
if (buf->mapCount == 0 && buf->isSlabBuffer)
_glthread_COND_BROADCAST(buf->event);
return 0;
}
static unsigned long
pool_offset(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
struct _DriSlab *slab;
struct _DriSlabSizeHeader *header;
if (!buf->isSlabBuffer) {
assert(buf->bo->proposedFlags & DRM_BO_FLAG_NO_MOVE);
return buf->bo->offset;
}
slab = buf->parent;
header = slab->header;
(void) header;
assert(header->slabPool->proposedFlags & DRM_BO_FLAG_NO_MOVE);
return slab->kbo->bo.offset + buf->start;
}
static unsigned long
pool_poolOffset(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
return buf->start;
}
static uint64_t
pool_flags(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
if (!buf->isSlabBuffer)
return buf->bo->flags;
return buf->parent->kbo->bo.flags;
}
static unsigned long
pool_size(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
if (!buf->isSlabBuffer)
return buf->bo->size;
return buf->parent->header->bufSize;
}
static int
pool_fence(struct _DriBufferPool *pool, void *private,
struct _DriFenceObject *fence)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
drmBO *bo;
if (buf->fence)
driFenceUnReference(&buf->fence);
buf->fence = driFenceReference(fence);
bo = (buf->isSlabBuffer) ?
&buf->parent->kbo->bo:
buf->bo;
buf->fenceType = bo->fenceFlags;
buf->unFenced = 0;
_glthread_COND_BROADCAST(buf->event);
return 0;
}
static drmBO *
pool_kernel(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
return (buf->isSlabBuffer) ? &buf->parent->kbo->bo : buf->bo;
}
static int
pool_validate(struct _DriBufferPool *pool, void *private,
_glthread_Mutex *mutex)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
if (!buf->isSlabBuffer)
return 0;
while(buf->mapCount != 0)
_glthread_COND_WAIT(buf->event, *mutex);
buf->unFenced = 1;
return 0;
}
struct _DriFreeSlabManager *
driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec)
{
struct _DriFreeSlabManager *tmp;
tmp = calloc(1, sizeof(*tmp));
if (!tmp)
return NULL;
_glthread_INIT_MUTEX(tmp->mutex);
_glthread_LOCK_MUTEX(tmp->mutex);
tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000;
tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000;
tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000;
tmp->checkInterval.tv_usec = checkIntervalMsec*1000;
tmp->checkInterval.tv_sec = tmp->checkInterval.tv_usec / 1000000;
tmp->checkInterval.tv_usec -= tmp->checkInterval.tv_sec*1000000;
gettimeofday(&tmp->nextCheck, NULL);
driTimeAdd(&tmp->nextCheck, &tmp->checkInterval);
DRMINITLISTHEAD(&tmp->timeoutList);
DRMINITLISTHEAD(&tmp->unCached);
DRMINITLISTHEAD(&tmp->cached);
_glthread_UNLOCK_MUTEX(tmp->mutex);
return tmp;
}
void
driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan)
{
struct timeval time;
time = fMan->nextCheck;
driTimeAdd(&time, &fMan->checkInterval);
_glthread_LOCK_MUTEX(fMan->mutex);
driFreeTimeoutKBOsLocked(fMan, &time);
_glthread_UNLOCK_MUTEX(fMan->mutex);
assert(fMan->timeoutList.next == &fMan->timeoutList);
assert(fMan->unCached.next == &fMan->unCached);
assert(fMan->cached.next == &fMan->cached);
free(fMan);
}
static void
driInitSizeHeader(struct _DriSlabPool *pool, uint32_t size,
struct _DriSlabSizeHeader *header)
{
_glthread_INIT_MUTEX(header->mutex);
_glthread_LOCK_MUTEX(header->mutex);
DRMINITLISTHEAD(&header->slabs);
DRMINITLISTHEAD(&header->freeSlabs);
DRMINITLISTHEAD(&header->delayedBuffers);
header->numDelayed = 0;
header->slabPool = pool;
header->bufSize = size;
_glthread_UNLOCK_MUTEX(header->mutex);
}
static void
driFinishSizeHeader(struct _DriSlabSizeHeader *header)
{
drmMMListHead *list, *next;
struct _DriSlabBuffer *buf;
_glthread_LOCK_MUTEX(header->mutex);
for (list = header->delayedBuffers.next, next = list->next;
list != &header->delayedBuffers;
list = next, next = list->next) {
buf = DRMLISTENTRY(struct _DriSlabBuffer, list , head);
if (buf->fence) {
(void) driFenceFinish(buf->fence, buf->fenceType, 0);
driFenceUnReference(&buf->fence);
}
header->numDelayed--;
driSlabFreeBufferLocked(buf);
}
_glthread_UNLOCK_MUTEX(header->mutex);
}
static void
pool_takedown(struct _DriBufferPool *driPool)
{
struct _DriSlabPool *pool = driPool->data;
int i;
for (i=0; i<pool->numBuckets; ++i) {
driFinishSizeHeader(&pool->headers[i]);
}
free(pool->headers);
free(pool->bucketSizes);
free(pool);
free(driPool);
}
struct _DriBufferPool *
driSlabPoolInit(int fd, uint64_t flags,
uint64_t validMask,
uint32_t smallestSize,
uint32_t numSizes,
uint32_t desiredNumBuffers,
uint32_t maxSlabSize,
uint32_t pageAlignment,
struct _DriFreeSlabManager *fMan)
{
struct _DriBufferPool *driPool;
struct _DriSlabPool *pool;
uint32_t i;
driPool = calloc(1, sizeof(*driPool));
if (!driPool)
return NULL;
pool = calloc(1, sizeof(*pool));
if (!pool)
goto out_err0;
pool->bucketSizes = calloc(numSizes, sizeof(*pool->bucketSizes));
if (!pool->bucketSizes)
goto out_err1;
pool->headers = calloc(numSizes, sizeof(*pool->headers));
if (!pool->headers)
goto out_err2;
pool->fMan = fMan;
pool->proposedFlags = flags;
pool->validMask = validMask;
pool->numBuckets = numSizes;
pool->pageSize = getpagesize();
pool->fd = fd;
pool->pageAlignment = pageAlignment;
pool->maxSlabSize = maxSlabSize;
pool->desiredNumBuffers = desiredNumBuffers;
for (i=0; i<pool->numBuckets; ++i) {
pool->bucketSizes[i] = (smallestSize << i);
driInitSizeHeader(pool, pool->bucketSizes[i],
&pool->headers[i]);
}
driPool->data = (void *) pool;
driPool->map = &pool_map;
driPool->unmap = &pool_unmap;
driPool->destroy = &pool_destroy;
driPool->offset = &pool_offset;
driPool->poolOffset = &pool_poolOffset;
driPool->flags = &pool_flags;
driPool->size = &pool_size;
driPool->create = &pool_create;
driPool->fence = &pool_fence;
driPool->kernel = &pool_kernel;
driPool->validate = &pool_validate;
driPool->waitIdle = &pool_waitIdle;
driPool->takeDown = &pool_takedown;
return driPool;
out_err2:
free(pool->bucketSizes);
out_err1:
free(pool);
out_err0:
free(driPool);
return NULL;
}
+38
View File
@@ -0,0 +1,38 @@
# src/mesa/drivers/egl_drm/Makefile
TOP = ../../../..
include $(TOP)/configs/current
default: $(TOP)/$(LIB_DIR) subdirs
$(TOP)/$(LIB_DIR):
-mkdir $(TOP)/$(LIB_DIR)
subdirs:
@for dir in $(DRI_DIRS) ; do \
if [ -d $$dir ] ; then \
(cd $$dir && $(MAKE)) || exit 1 ; \
fi \
done
install:
@for dir in $(DRI_DIRS) ; do \
if [ -d $$dir ] ; then \
(cd $$dir && $(MAKE) install) || exit 1 ; \
fi \
done
clean:
@for dir in $(DRI_DIRS) ; do \
if [ -d $$dir ] ; then \
(cd $$dir && $(MAKE) clean) ; \
fi \
done
-rm -f common/*.o
@@ -0,0 +1,116 @@
# -*-makefile-*-
MESA_MODULES = \
$(TOP)/src/mesa/libmesa.a \
$(GALLIUM_AUXILIARIES)
COMMON_GALLIUM_SOURCES = \
$(TOP)/src/mesa/drivers/dri/common/utils.c \
$(TOP)/src/mesa/drivers/dri/common/vblank.c \
$(TOP)/src/mesa/drivers/dri/common/dri_util.c \
$(TOP)/src/mesa/drivers/dri/common/xmlconfig.c
COMMON_SOURCES = $(COMMON_GALLIUM_SOURCES) \
$(TOP)/src/mesa/drivers/common/driverfuncs.c \
$(TOP)/src/mesa/drivers/dri/common/texmem.c \
$(TOP)/src/mesa/drivers/dri/common/drirenderbuffer.c
COMMON_BM_SOURCES = \
$(TOP)/src/mesa/drivers/dri/common/dri_bufmgr.c \
$(TOP)/src/mesa/drivers/dri/common/dri_drmpool.c
ifeq ($(WINDOW_SYSTEM),dri)
WINOBJ=
WINLIB=
INCLUDES = $(SHARED_INCLUDES) $(EXPAT_INCLUDES)
OBJECTS = \
$(C_SOURCES:.c=.o) \
$(ASM_SOURCES:.S=.o)
else
# miniglx
WINOBJ=
WINLIB=-L$(MESA)/src/glx/mini
MINIGLX_INCLUDES = -I$(TOP)/src/glx/mini
INCLUDES = $(MINIGLX_INCLUDES) \
$(SHARED_INCLUDES) \
$(PCIACCESS_CFLAGS)
OBJECTS = $(C_SOURCES:.c=.o) \
$(MINIGLX_SOURCES:.c=.o) \
$(ASM_SOURCES:.S=.o)
endif
### Include directories
SHARED_INCLUDES = \
-I. \
-I$(TOP)/src/mesa/drivers/dri/common \
-Iserver \
-I$(TOP)/include \
-I$(TOP)/include/GL/internal \
-I$(TOP)/src/gallium/include \
-I$(TOP)/src/gallium/auxiliary \
-I$(TOP)/src/gallium/drivers \
-I$(TOP)/src/mesa \
-I$(TOP)/src/mesa/main \
-I$(TOP)/src/mesa/glapi \
-I$(TOP)/src/mesa/math \
-I$(TOP)/src/mesa/transform \
-I$(TOP)/src/mesa/shader \
-I$(TOP)/src/mesa/swrast \
-I$(TOP)/src/mesa/swrast_setup \
-I$(TOP)/src/egl/main \
-I$(TOP)/src/egl/drivers/dri \
$(LIBDRM_CFLAGS)
##### RULES #####
.c.o:
$(CC) -c $(INCLUDES) $(CFLAGS) $(DRIVER_DEFINES) $< -o $@
.S.o:
$(CC) -c $(INCLUDES) $(CFLAGS) $(DRIVER_DEFINES) $< -o $@
##### TARGETS #####
default: depend symlinks $(LIBNAME) $(TOP)/$(LIB_DIR)/$(LIBNAME)
$(LIBNAME): $(OBJECTS) $(MESA_MODULES) $(PIPE_DRIVERS) $(WINOBJ) Makefile $(TOP)/src/mesa/drivers/dri/Makefile.template
$(TOP)/bin/mklib -noprefix -o $@ \
$(OBJECTS) $(PIPE_DRIVERS) $(MESA_MODULES) $(WINOBJ) $(DRI_LIB_DEPS)
$(TOP)/$(LIB_DIR)/$(LIBNAME): $(LIBNAME)
$(INSTALL) $(LIBNAME) $(TOP)/$(LIB_DIR)
depend: $(C_SOURCES) $(ASM_SOURCES) $(SYMLINKS)
rm -f depend
touch depend
$(MKDEP) $(MKDEP_OPTIONS) $(DRIVER_DEFINES) $(INCLUDES) $(C_SOURCES) \
$(ASM_SOURCES) 2> /dev/null
# Emacs tags
tags:
etags `find . -name \*.[ch]` `find ../include`
# Remove .o and backup files
clean:
-rm -f *.o */*.o *~ *.so *~ server/*.o $(SYMLINKS)
-rm -f depend depend.bak
install: $(LIBNAME)
$(INSTALL) -d $(DRI_DRIVER_INSTALL_DIR)
$(INSTALL) -m 755 $(LIBNAME) $(DRI_DRIVER_INSTALL_DIR)
include depend
+40
View File
@@ -0,0 +1,40 @@
TOP = ../../../../..
include $(TOP)/configs/current
LIBNAME = EGL_i915.so
PIPE_DRIVERS = \
$(TOP)/src/gallium/drivers/softpipe/libsoftpipe.a \
$(TOP)/src/gallium/drivers/i915simple/libi915simple.a
DRIVER_SOURCES = \
intel_winsys_pipe.c \
intel_winsys_softpipe.c \
intel_winsys_i915.c \
intel_batchbuffer.c \
intel_swapbuffers.c \
intel_context.c \
intel_lock.c \
intel_screen.c \
ws_dri_bufmgr.c \
ws_dri_drmpool.c \
ws_dri_fencemgr.c \
ws_dri_mallocpool.c \
ws_dri_slabpool.c \
intel_egl.c
C_SOURCES = \
$(COMMON_GALLIUM_SOURCES) \
$(DRIVER_SOURCES)
ASM_SOURCES =
DRIVER_DEFINES = -I$(TOP)/src/mesa/drivers/dri/intel $(shell pkg-config libdrm --atleast-version=2.3.1 \
&& echo "-DDRM_VBLANK_FLIP=DRM_VBLANK_FLIP")
include ../Makefile.template
intel_tex_layout.o: $(TOP)/src/mesa/drivers/dri/intel/intel_tex_layout.c
symlinks:
@@ -0,0 +1,39 @@
Import('*')
env = drienv.Clone()
env.Append(CPPPATH = [
'../intel',
'server'
])
#MINIGLX_SOURCES = server/intel_dri.c
DRIVER_SOURCES = [
'intel_winsys_pipe.c',
'intel_winsys_softpipe.c',
'intel_winsys_i915.c',
'intel_batchbuffer.c',
'intel_swapbuffers.c',
'intel_context.c',
'intel_lock.c',
'intel_screen.c',
'intel_batchpool.c',
]
sources = \
COMMON_GALLIUM_SOURCES + \
COMMON_BM_SOURCES + \
DRIVER_SOURCES
drivers = [
softpipe,
i915simple
]
# TODO: write a wrapper function http://www.scons.org/wiki/WrapperFunctions
env.SharedLibrary(
target ='i915tex_dri.so',
source = sources,
LIBS = drivers + mesa + auxiliaries + env['LIBS'],
)
@@ -0,0 +1,465 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "intel_batchbuffer.h"
#include "intel_context.h"
#include "intel_egl.h"
#include <errno.h>
#if 0
static void
intel_dump_batchbuffer(GLuint offset, GLuint * ptr, GLuint count)
{
int i;
fprintf(stderr, "\n\n\nSTART BATCH (%d dwords):\n", count / 4);
for (i = 0; i < count / 4; i += 4)
fprintf(stderr, "0x%x:\t0x%08x 0x%08x 0x%08x 0x%08x\n",
offset + i * 4, ptr[i], ptr[i + 1], ptr[i + 2], ptr[i + 3]);
fprintf(stderr, "END BATCH\n\n\n");
}
#endif
static void
intel_realloc_relocs(struct intel_batchbuffer *batch, int num_relocs)
{
unsigned long size = num_relocs * I915_RELOC0_STRIDE + I915_RELOC_HEADER;
size *= sizeof(uint32_t);
batch->reloc = realloc(batch->reloc, size);
batch->reloc_size = num_relocs;
}
void
intel_batchbuffer_reset(struct intel_batchbuffer *batch)
{
/*
* Get a new, free batchbuffer.
*/
drmBO *bo;
struct drm_bo_info_req *req;
int ret;
driBOUnrefUserList(batch->list);
driBOResetList(batch->list);
batch->size = 4096; // ZZZ JB batch->intel->intelScreen->maxBatchSize;
ret = driBOData(batch->buffer, batch->size, NULL, NULL, 0);
assert(!ret);
/*
* Add the batchbuffer to the validate list.
*/
driBOAddListItem(batch->list, batch->buffer,
DRM_BO_FLAG_EXE | DRM_BO_FLAG_MEM_TT,
DRM_BO_FLAG_EXE | DRM_BO_MASK_MEM,
&batch->dest_location, &batch->node);
req = &batch->node->bo_arg.d.req.bo_req;
/*
* Set up information needed for us to make relocations
* relative to the underlying drm buffer objects.
*/
driReadLockKernelBO();
bo = driBOKernel(batch->buffer);
req->presumed_offset = (uint64_t) bo->offset;
req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
batch->drmBOVirtual = (uint8_t *) bo->virtual;
driReadUnlockKernelBO();
/*
* Adjust the relocation buffer size.
*/
if (batch->reloc_size > INTEL_MAX_RELOCS ||
batch->reloc == NULL)
intel_realloc_relocs(batch, INTEL_DEFAULT_RELOCS);
assert(batch->reloc != NULL);
batch->reloc[0] = 0; /* No relocs yet. */
batch->reloc[1] = 1; /* Reloc type 1 */
batch->reloc[2] = 0; /* Only a single relocation list. */
batch->reloc[3] = 0; /* Only a single relocation list. */
batch->map = driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, 0);
batch->poolOffset = driBOPoolOffset(batch->buffer);
batch->ptr = batch->map;
batch->dirty_state = ~0;
batch->nr_relocs = 0;
batch->flags = 0;
batch->id = 0;//batch->intel->intelScreen->batch_id++;
}
/*======================================================================
* Public functions
*/
struct intel_batchbuffer *
intel_batchbuffer_alloc(struct intel_screen *intel_screen)
{
struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
batch->intel_screen = intel_screen;
driGenBuffers(intel_screen->batchPool, "batchbuffer", 1,
&batch->buffer, 4096,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, 0);
batch->last_fence = NULL;
batch->list = driBOCreateList(20);
batch->reloc = NULL;
intel_batchbuffer_reset(batch);
return batch;
}
void
intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
if (batch->last_fence) {
driFenceFinish(batch->last_fence,
DRM_FENCE_TYPE_EXE, GL_FALSE);
driFenceUnReference(&batch->last_fence);
}
if (batch->map) {
driBOUnmap(batch->buffer);
batch->map = NULL;
}
driBOUnReference(batch->buffer);
driBOFreeList(batch->list);
if (batch->reloc)
free(batch->reloc);
batch->buffer = NULL;
free(batch);
}
void
intel_offset_relocation(struct intel_batchbuffer *batch,
unsigned pre_add,
struct _DriBufferObject *driBO,
uint64_t val_flags,
uint64_t val_mask)
{
int itemLoc;
struct _drmBONode *node;
uint32_t *reloc;
struct drm_bo_info_req *req;
driBOAddListItem(batch->list, driBO, val_flags, val_mask,
&itemLoc, &node);
req = &node->bo_arg.d.req.bo_req;
if (!(req->hint & DRM_BO_HINT_PRESUMED_OFFSET)) {
/*
* Stop other threads from tampering with the underlying
* drmBO while we're reading its offset.
*/
driReadLockKernelBO();
req->presumed_offset = (uint64_t) driBOKernel(driBO)->offset;
driReadUnlockKernelBO();
req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
}
pre_add += driBOPoolOffset(driBO);
if (batch->nr_relocs == batch->reloc_size)
intel_realloc_relocs(batch, batch->reloc_size * 2);
reloc = batch->reloc +
(I915_RELOC_HEADER + batch->nr_relocs * I915_RELOC0_STRIDE);
reloc[0] = ((uint8_t *)batch->ptr - batch->drmBOVirtual);
intel_batchbuffer_emit_dword(batch, req->presumed_offset + pre_add);
reloc[1] = pre_add;
reloc[2] = itemLoc;
reloc[3] = batch->dest_location;
batch->nr_relocs++;
}
static void
i915_drm_copy_reply(const struct drm_bo_info_rep * rep, drmBO * buf)
{
buf->handle = rep->handle;
buf->flags = rep->flags;
buf->size = rep->size;
buf->offset = rep->offset;
buf->mapHandle = rep->arg_handle;
buf->proposedFlags = rep->proposed_flags;
buf->start = rep->buffer_start;
buf->fenceFlags = rep->fence_flags;
buf->replyFlags = rep->rep_flags;
buf->pageAlignment = rep->page_alignment;
}
static int
i915_execbuf(struct intel_batchbuffer *batch,
GLuint used,
GLboolean ignore_cliprects,
drmBOList *list,
struct drm_i915_execbuffer *ea)
{
struct intel_screen *intel_screen = batch->intel_screen;
drmBONode *node;
drmMMListHead *l;
struct drm_i915_op_arg *arg, *first;
struct drm_bo_op_req *req;
struct drm_bo_info_rep *rep;
uint64_t *prevNext = NULL;
drmBO *buf;
int ret = 0;
uint32_t count = 0;
first = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
arg = &node->bo_arg;
req = &arg->d.req;
if (!first)
first = arg;
if (prevNext)
*prevNext = (unsigned long)arg;
prevNext = &arg->next;
req->bo_req.handle = node->buf->handle;
req->op = drm_bo_validate;
req->bo_req.flags = node->arg0;
req->bo_req.mask = node->arg1;
req->bo_req.hint |= 0;
count++;
}
memset(ea, 0, sizeof(*ea));
ea->num_buffers = count;
ea->batch.start = batch->poolOffset;
ea->batch.used = used;
#if 0 /* ZZZ JB: no cliprects used */
ea->batch.cliprects = intel->pClipRects;
ea->batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
ea->batch.DR1 = 0;
ea->batch.DR4 = 0;((((GLuint) intel->drawX) & 0xffff) |
(((GLuint) intel->drawY) << 16));
#else
ea->batch.cliprects = NULL;
ea->batch.num_cliprects = 0;
ea->batch.DR1 = 0;
ea->batch.DR4 = 0;
#endif
ea->fence_arg.flags = DRM_I915_FENCE_FLAG_FLUSHED;
ea->ops_list = (unsigned long) first;
first->reloc_ptr = (unsigned long) batch->reloc;
batch->reloc[0] = batch->nr_relocs;
//return -EFAULT;
do {
ret = drmCommandWriteRead(intel_screen->device->drmFD, DRM_I915_EXECBUFFER, ea,
sizeof(*ea));
} while (ret == -EAGAIN);
if (ret != 0) {
printf("%s somebody set us up the bomb\n", __FUNCTION__);
return ret;
}
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
arg = &node->bo_arg;
rep = &arg->d.rep.bo_info;
if (!arg->handled) {
return -EFAULT;
}
if (arg->d.rep.ret)
return arg->d.rep.ret;
buf = node->buf;
i915_drm_copy_reply(rep, buf);
}
return 0;
}
/* TODO: Push this whole function into bufmgr.
*/
static struct _DriFenceObject *
do_flush_locked(struct intel_batchbuffer *batch,
GLuint used,
GLboolean ignore_cliprects, GLboolean allow_unlock)
{
struct intel_screen *intel_screen = batch->intel_screen;
struct _DriFenceObject *fo;
drmFence fence;
drmBOList *boList;
struct drm_i915_execbuffer ea;
int ret = 0;
driBOValidateUserList(batch->list);
boList = driGetdrmBOList(batch->list);
#if 0 /* ZZZ JB Allways run */
if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
#else
if (1) {
#endif
ret = i915_execbuf(batch, used, ignore_cliprects, boList, &ea);
} else {
driPutdrmBOList(batch->list);
fo = NULL;
goto out;
}
driPutdrmBOList(batch->list);
if (ret)
abort();
if (ea.fence_arg.error != 0) {
/*
* The hardware has been idled by the kernel.
* Don't fence the driBOs.
*/
if (batch->last_fence)
driFenceUnReference(&batch->last_fence);
#if 0 /* ZZZ JB: no _mesa_* funcs in gallium */
_mesa_printf("fence error\n");
#endif
batch->last_fence = NULL;
fo = NULL;
goto out;
}
fence.handle = ea.fence_arg.handle;
fence.fence_class = ea.fence_arg.fence_class;
fence.type = ea.fence_arg.type;
fence.flags = ea.fence_arg.flags;
fence.signaled = ea.fence_arg.signaled;
fo = driBOFenceUserList(intel_screen->mgr, batch->list,
"SuperFence", &fence);
if (driFenceType(fo) & DRM_I915_FENCE_TYPE_RW) {
if (batch->last_fence)
driFenceUnReference(&batch->last_fence);
/*
* FIXME: Context last fence??
*/
batch->last_fence = fo;
driFenceReference(fo);
}
out:
#if 0 /* ZZZ JB: fix this */
intel->vtbl.lost_hardware(intel);
#else
#endif
return fo;
}
struct _DriFenceObject *
intel_batchbuffer_flush(struct intel_batchbuffer *batch)
{
//struct intel_context *intel = batch->intel;
GLuint used = batch->ptr - batch->map;
//GLboolean was_locked = 1;
struct _DriFenceObject *fence;
if (used == 0) {
driFenceReference(batch->last_fence);
return batch->last_fence;
}
/* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
* performance drain that we would like to avoid.
*/
#if 0 /* ZZZ JB: what should we do here? */
if (used & 4) {
((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
((int *) batch->ptr)[1] = 0;
((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END;
used += 12;
}
else {
((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END;
used += 8;
}
#else
if (used & 4) {
((int *) batch->ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
((int *) batch->ptr)[1] = 0;
((int *) batch->ptr)[2] = (0xA<<23); // MI_BATCH_BUFFER_END;
used += 12;
}
else {
((int *) batch->ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
((int *) batch->ptr)[1] = (0xA<<23); // MI_BATCH_BUFFER_END;
used += 8;
}
#endif
driBOUnmap(batch->buffer);
batch->ptr = NULL;
batch->map = NULL;
/* TODO: Just pass the relocation list and dma buffer up to the
* kernel.
*/
/* if (!was_locked)
LOCK_HARDWARE(intel);*/
fence = do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS),
GL_FALSE);
/* if (!was_locked)
UNLOCK_HARDWARE(intel);*/
/* Reset the buffer:
*/
intel_batchbuffer_reset(batch);
return fence;
}
void
intel_batchbuffer_finish(struct intel_batchbuffer *batch)
{
struct _DriFenceObject *fence = intel_batchbuffer_flush(batch);
driFenceFinish(fence, driFenceType(fence), GL_FALSE);
driFenceUnReference(&fence);
}
void
intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data, GLuint bytes, GLuint flags)
{
assert((bytes & 3) == 0);
intel_batchbuffer_require_space(batch, bytes, flags);
memcpy(batch->ptr, data, bytes);
batch->ptr += bytes;
}
@@ -0,0 +1,133 @@
#ifndef INTEL_BATCHBUFFER_H
#define INTEL_BATCHBUFFER_H
#include "mtypes.h"
#include "ws_dri_bufmgr.h"
struct intel_screen;
#define BATCH_SZ 16384
#define BATCH_RESERVED 16
#define INTEL_DEFAULT_RELOCS 100
#define INTEL_MAX_RELOCS 400
#define INTEL_BATCH_NO_CLIPRECTS 0x1
#define INTEL_BATCH_CLIPRECTS 0x2
struct intel_batchbuffer
{
struct bufmgr *bm;
struct intel_screen *intel_screen;
struct _DriBufferObject *buffer;
struct _DriFenceObject *last_fence;
GLuint flags;
struct _DriBufferList *list;
GLuint list_count;
GLubyte *map;
GLubyte *ptr;
uint32_t *reloc;
GLuint reloc_size;
GLuint nr_relocs;
GLuint size;
GLuint dirty_state;
GLuint id;
uint32_t poolOffset;
uint8_t *drmBOVirtual;
struct _drmBONode *node; /* Validation list node for this buffer */
int dest_location; /* Validation list sequence for this buffer */
};
struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_screen
*intel);
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
void intel_batchbuffer_finish(struct intel_batchbuffer *batch);
struct _DriFenceObject *intel_batchbuffer_flush(struct intel_batchbuffer
*batch);
void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
/* Unlike bmBufferData, this currently requires the buffer be mapped.
* Consider it a convenience function wrapping multple
* intel_buffer_dword() calls.
*/
void intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data, GLuint bytes, GLuint flags);
void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
GLuint bytes);
void
intel_offset_relocation(struct intel_batchbuffer *batch,
unsigned pre_add,
struct _DriBufferObject *driBO,
uint64_t val_flags,
uint64_t val_mask);
/* Inline functions - might actually be better off with these
* non-inlined. Certainly better off switching all command packets to
* be passed as structs rather than dwords, but that's a little bit of
* work...
*/
static INLINE GLuint
intel_batchbuffer_space(struct intel_batchbuffer *batch)
{
return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
}
static INLINE void
intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
{
assert(batch->map);
assert(intel_batchbuffer_space(batch) >= 4);
*(GLuint *) (batch->ptr) = dword;
batch->ptr += 4;
}
static INLINE void
intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
GLuint sz, GLuint flags)
{
struct _DriFenceObject *fence;
assert(sz < batch->size - 8);
if (intel_batchbuffer_space(batch) < sz ||
(batch->flags != 0 && flags != 0 && batch->flags != flags)) {
fence = intel_batchbuffer_flush(batch);
driFenceUnReference(&fence);
}
batch->flags |= flags;
}
/* Here are the crusty old macros, to be removed:
*/
#define BATCH_LOCALS
#define BEGIN_BATCH(n, flags) do { \
assert(!intel->prim.flush); \
intel_batchbuffer_require_space(intel->batch, (n)*4, flags); \
} while (0)
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
#define OUT_RELOC(buf,flags,mask,delta) do { \
assert((delta) >= 0); \
intel_offset_relocation(intel->batch, delta, buf, flags, mask); \
} while (0)
#define ADVANCE_BATCH() do { } while(0)
#endif
@@ -0,0 +1,366 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_swapbuffers.h"
#include "intel_winsys.h"
#include "intel_batchbuffer.h"
#include "state_tracker/st_public.h"
#include "pipe/p_defines.h"
#include "pipe/p_context.h"
#include "intel_egl.h"
#include "utils.h"
#ifdef DEBUG
int __intel_debug = 0;
#endif
#define need_GL_ARB_multisample
#define need_GL_ARB_point_parameters
#define need_GL_ARB_texture_compression
#define need_GL_ARB_vertex_buffer_object
#define need_GL_ARB_vertex_program
#define need_GL_ARB_window_pos
#define need_GL_EXT_blend_color
#define need_GL_EXT_blend_equation_separate
#define need_GL_EXT_blend_func_separate
#define need_GL_EXT_blend_minmax
#define need_GL_EXT_cull_vertex
#define need_GL_EXT_fog_coord
#define need_GL_EXT_framebuffer_object
#define need_GL_EXT_multi_draw_arrays
#define need_GL_EXT_secondary_color
#define need_GL_NV_vertex_program
#include "extension_helper.h"
/**
* Extension strings exported by the intel driver.
*
* \note
* It appears that ARB_texture_env_crossbar has "disappeared" compared to the
* old i830-specific driver.
*/
const struct dri_extension card_extensions[] = {
{"GL_ARB_multisample", GL_ARB_multisample_functions},
{"GL_ARB_multitexture", NULL},
{"GL_ARB_point_parameters", GL_ARB_point_parameters_functions},
{"GL_ARB_texture_border_clamp", NULL},
{"GL_ARB_texture_compression", GL_ARB_texture_compression_functions},
{"GL_ARB_texture_cube_map", NULL},
{"GL_ARB_texture_env_add", NULL},
{"GL_ARB_texture_env_combine", NULL},
{"GL_ARB_texture_env_dot3", NULL},
{"GL_ARB_texture_mirrored_repeat", NULL},
{"GL_ARB_texture_rectangle", NULL},
{"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions},
{"GL_ARB_pixel_buffer_object", NULL},
{"GL_ARB_vertex_program", GL_ARB_vertex_program_functions},
{"GL_ARB_window_pos", GL_ARB_window_pos_functions},
{"GL_EXT_blend_color", GL_EXT_blend_color_functions},
{"GL_EXT_blend_equation_separate", GL_EXT_blend_equation_separate_functions},
{"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions},
{"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions},
{"GL_EXT_blend_subtract", NULL},
{"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions},
{"GL_EXT_fog_coord", GL_EXT_fog_coord_functions},
{"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
{"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions},
{"GL_EXT_packed_depth_stencil", NULL},
{"GL_EXT_pixel_buffer_object", NULL},
{"GL_EXT_secondary_color", GL_EXT_secondary_color_functions},
{"GL_EXT_stencil_wrap", NULL},
{"GL_EXT_texture_edge_clamp", NULL},
{"GL_EXT_texture_env_combine", NULL},
{"GL_EXT_texture_env_dot3", NULL},
{"GL_EXT_texture_filter_anisotropic", NULL},
{"GL_EXT_texture_lod_bias", NULL},
{"GL_3DFX_texture_compression_FXT1", NULL},
{"GL_APPLE_client_storage", NULL},
{"GL_MESA_pack_invert", NULL},
{"GL_MESA_ycbcr_texture", NULL},
{"GL_NV_blend_square", NULL},
{"GL_NV_vertex_program", GL_NV_vertex_program_functions},
{"GL_NV_vertex_program1_1", NULL},
{"GL_SGIS_generate_mipmap", NULL },
{NULL, NULL}
};
int
intel_create_context(struct egl_drm_context *eglCon, const __GLcontextModes *visual, void *sharedContextPrivate)
{
struct intel_context *iCon = CALLOC_STRUCT(intel_context);
struct intel_screen *iScrn = (struct intel_screen *)eglCon->device->priv;
struct pipe_context *pipe;
struct st_context *st_share = NULL;
eglCon->priv = iCon;
iCon->intel_screen = iScrn;
iCon->egl_context = eglCon;
iCon->egl_device = eglCon->device;
iCon->batch = intel_batchbuffer_alloc(iScrn);
iCon->last_swap_fence = NULL;
iCon->first_swap_fence = NULL;
pipe = intel_create_i915simple(iCon, iScrn->winsys);
// pipe = intel_create_softpipe(iCon, iScrn->winsys);
pipe->priv = iCon;
iCon->st = st_create_context(pipe, visual, st_share);
return TRUE;
}
void
intel_make_current(struct egl_drm_context *context, struct egl_drm_drawable *draw, struct egl_drm_drawable *read)
{
if (context) {
struct intel_context *intel = (struct intel_context *)context->priv;
struct intel_framebuffer *draw_fb = (struct intel_framebuffer *)draw->priv;
struct intel_framebuffer *read_fb = (struct intel_framebuffer *)read->priv;
assert(draw_fb->stfb);
assert(read_fb->stfb);
st_make_current(intel->st, draw_fb->stfb, read_fb->stfb);
intel->egl_drawable = draw;
st_resize_framebuffer(draw_fb->stfb, draw->w, draw->h);
if (draw != read)
st_resize_framebuffer(read_fb->stfb, read->w, read->h);
//intelUpdateWindowSize(driDrawPriv);
} else {
st_make_current(NULL, NULL, NULL);
}
}
void
intel_bind_frontbuffer(struct egl_drm_drawable *draw, struct egl_drm_frontbuffer *front)
{
struct intel_screen *intelScreen = (struct intel_screen *)draw->device->priv;
struct intel_framebuffer *draw_fb = (struct intel_framebuffer *)draw->priv;
driBOUnReference(draw_fb->front_buffer);
draw_fb->front_buffer = NULL;
draw_fb->front = NULL;
/* to unbind just call this function with front == NULL */
if (!front)
return;
draw_fb->front = front;
driGenBuffers(intelScreen->staticPool, "front", 1, &draw_fb->front_buffer, 0, 0, 0);
driBOSetReferenced(draw_fb->front_buffer, front->handle);
st_resize_framebuffer(draw_fb->stfb, draw->w, draw->h);
}
#if 0
GLboolean
intelCreateContext(const __GLcontextModes * visual,
__DRIcontextPrivate * driContextPriv,
void *sharedContextPrivate)
{
struct intel_context *intel = CALLOC_STRUCT(intel_context);
__DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
struct intel_screen *intelScreen = intel_screen(sPriv);
drmI830Sarea *saPriv = intelScreen->sarea;
int fthrottle_mode;
GLboolean havePools;
struct pipe_context *pipe;
struct st_context *st_share = NULL;
if (sharedContextPrivate) {
st_share = ((struct intel_context *) sharedContextPrivate)->st;
}
driContextPriv->driverPrivate = intel;
intel->intelScreen = intelScreen;
intel->driScreen = sPriv;
intel->sarea = saPriv;
driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
intel->driScreen->myNum, "i915");
/*
* memory pools
*/
DRM_LIGHT_LOCK(sPriv->fd, &sPriv->pSAREA->lock, driContextPriv->hHWContext);
// ZZZ JB should be per screen and not be done per context
havePools = intelCreatePools(sPriv);
DRM_UNLOCK(sPriv->fd, &sPriv->pSAREA->lock, driContextPriv->hHWContext);
if (!havePools)
return GL_FALSE;
/* Dri stuff */
intel->hHWContext = driContextPriv->hHWContext;
intel->driFd = sPriv->fd;
intel->driHwLock = (drmLock *) & sPriv->pSAREA->lock;
fthrottle_mode = driQueryOptioni(&intel->optionCache, "fthrottle_mode");
intel->iw.irq_seq = -1;
intel->irqsEmitted = 0;
intel->batch = intel_batchbuffer_alloc(intel);
intel->last_swap_fence = NULL;
intel->first_swap_fence = NULL;
#ifdef DEBUG
__intel_debug = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
#endif
/*
* Pipe-related setup
*/
if (getenv("INTEL_SP")) {
/* use softpipe driver instead of hw */
pipe = intel_create_softpipe( intel, intelScreen->winsys );
}
else {
switch (intel->intelScreen->deviceID) {
case PCI_CHIP_I945_G:
case PCI_CHIP_I945_GM:
case PCI_CHIP_I945_GME:
case PCI_CHIP_G33_G:
case PCI_CHIP_Q33_G:
case PCI_CHIP_Q35_G:
case PCI_CHIP_I915_G:
case PCI_CHIP_I915_GM:
pipe = intel_create_i915simple( intel, intelScreen->winsys );
break;
default:
fprintf(stderr, "Unknown PCIID %x in %s, using software driver\n",
intel->intelScreen->deviceID, __FUNCTION__);
pipe = intel_create_softpipe( intel, intelScreen->winsys );
break;
}
}
pipe->priv = intel;
intel->st = st_create_context(pipe, visual, st_share);
return GL_TRUE;
}
void
intelDestroyContext(__DRIcontextPrivate * driContextPriv)
{
struct intel_context *intel = intel_context(driContextPriv);
assert(intel); /* should never be null */
if (intel) {
st_finish(intel->st);
intel_batchbuffer_free(intel->batch);
if (intel->last_swap_fence) {
driFenceFinish(intel->last_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
driFenceUnReference(&intel->last_swap_fence);
intel->last_swap_fence = NULL;
}
if (intel->first_swap_fence) {
driFenceFinish(intel->first_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
driFenceUnReference(&intel->first_swap_fence);
intel->first_swap_fence = NULL;
}
if (intel->intelScreen->dummyContext == intel)
intel->intelScreen->dummyContext = NULL;
st_destroy_context(intel->st);
free(intel);
}
}
GLboolean
intelUnbindContext(__DRIcontextPrivate * driContextPriv)
{
struct intel_context *intel = intel_context(driContextPriv);
st_flush(intel->st, PIPE_FLUSH_RENDER_CACHE, NULL);
/* XXX make_current(NULL)? */
return GL_TRUE;
}
GLboolean
intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
__DRIdrawablePrivate * driDrawPriv,
__DRIdrawablePrivate * driReadPriv)
{
if (driContextPriv) {
struct intel_context *intel = intel_context(driContextPriv);
struct intel_framebuffer *draw_fb = intel_framebuffer(driDrawPriv);
struct intel_framebuffer *read_fb = intel_framebuffer(driReadPriv);
assert(draw_fb->stfb);
assert(read_fb->stfb);
/* This is for situations in which we need a rendering context but
* there may not be any currently bound.
*/
intel->intelScreen->dummyContext = intel;
st_make_current(intel->st, draw_fb->stfb, read_fb->stfb);
if ((intel->driDrawable != driDrawPriv) ||
(intel->lastStamp != driDrawPriv->lastStamp)) {
intel->driDrawable = driDrawPriv;
intelUpdateWindowSize(driDrawPriv);
intel->lastStamp = driDrawPriv->lastStamp;
}
/* The size of the draw buffer will have been updated above.
* If the readbuffer is a different window, check/update its size now.
*/
if (driReadPriv != driDrawPriv) {
intelUpdateWindowSize(driReadPriv);
}
}
else {
st_make_current(NULL, NULL, NULL);
}
return GL_TRUE;
}
#endif
@@ -0,0 +1,162 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_CONTEXT_H
#define INTEL_CONTEXT_H
#include <stdint.h>
#include "drm.h"
#include "pipe/p_debug.h"
#include "intel_screen.h"
#include "i915_drm.h"
struct pipe_context;
struct intel_context;
struct _DriBufferObject;
struct st_context;
struct egl_drm_device;
struct egl_drm_context;
struct egl_drm_frontbuffer;
#define INTEL_MAX_FIXUP 64
/**
* Intel rendering context, contains a state tracker and intel-specific info.
*/
struct intel_context
{
struct st_context *st;
struct _DriFenceObject *last_swap_fence;
struct _DriFenceObject *first_swap_fence;
struct intel_batchbuffer *batch;
#if 0
boolean locked;
char *prevLockFile;
int prevLockLine;
#endif
/* pick this up from the screen instead
int drmFd;
*/
struct intel_screen *intel_screen;
uint lastStamp;
/* new egl stuff */
struct egl_drm_device *egl_device;
struct egl_drm_context *egl_context;
struct egl_drm_drawable *egl_drawable;
};
/**
* Intel framebuffer.
*/
struct intel_framebuffer
{
struct st_framebuffer *stfb;
/* other fields TBD */
int other;
struct _DriBufferObject *front_buffer;
struct egl_drm_frontbuffer *front;
};
/* These are functions now:
*/
void LOCK_HARDWARE( struct intel_context *intel );
void UNLOCK_HARDWARE( struct intel_context *intel );
extern char *__progname;
/* ================================================================
* Debugging:
*/
#ifdef DEBUG
extern int __intel_debug;
#define DEBUG_SWAP 0x1
#define DEBUG_LOCK 0x2
#define DEBUG_IOCTL 0x4
#define DEBUG_BATCH 0x8
#define DBG(flag, ...) do { \
if (__intel_debug & (DEBUG_##flag)) \
printf(__VA_ARGS__); \
} while(0)
#else
#define DBG(flag, ...)
#endif
#define PCI_CHIP_845_G 0x2562
#define PCI_CHIP_I830_M 0x3577
#define PCI_CHIP_I855_GM 0x3582
#define PCI_CHIP_I865_G 0x2572
#define PCI_CHIP_I915_G 0x2582
#define PCI_CHIP_I915_GM 0x2592
#define PCI_CHIP_I945_G 0x2772
#define PCI_CHIP_I945_GM 0x27A2
#define PCI_CHIP_I945_GME 0x27AE
#define PCI_CHIP_G33_G 0x29C2
#define PCI_CHIP_Q35_G 0x29B2
#define PCI_CHIP_Q33_G 0x29D2
#if 0
/** Cast wrapper */
static INLINE struct intel_context *
intel_context(__DRIcontextPrivate *driContextPriv)
{
return (struct intel_context *) driContextPriv->driverPrivate;
}
/** Cast wrapper */
static INLINE struct intel_framebuffer *
intel_framebuffer(__DRIdrawablePrivate * driDrawPriv)
{
return (struct intel_framebuffer *) driDrawPriv->driverPrivate;
}
#endif
#endif
@@ -0,0 +1,617 @@
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include "eglconfig.h"
#include "eglcontext.h"
#include "egldisplay.h"
#include "egldriver.h"
#include "eglglobals.h"
#include "eglmode.h"
#include "eglscreen.h"
#include "eglsurface.h"
#include "glapi.h"
#include "intel_egl.h"
#include "xf86drm.h"
#include "xf86drmMode.h"
#include "intel_context.h"
#include "ws_dri_bufmgr.h"
#include "intel_winsys.h"
#include "state_tracker/st_public.h"
struct egl_drm_device* egl_drm_create_device(int drmFD);
struct egl_drm_device*
egl_drm_create_device(int drmFD)
{
struct egl_drm_device *device = malloc(sizeof(*device));
memset(device, 0, sizeof(*device));
device->drmFD = drmFD;
if (!intel_init_driver(device)) {
printf("EGL: failed to initalize device\n");
free(device);
}
return device;
}
__GLcontextModes* _gl_context_modes_create( unsigned count, size_t minimum_size );
struct drm_driver
{
_EGLDriver base; /* base class/object */
drmModeResPtr res;
struct egl_drm_device *device;
};
struct drm_surface
{
_EGLSurface base; /* base class/object */
struct egl_drm_drawable *drawable;
};
struct drm_context
{
_EGLContext base; /* base class/object */
struct egl_drm_context *context;
};
struct drm_screen
{
_EGLScreen base;
/* backing buffer and crtc */
drmBO buffer;
drmModeFBPtr fb;
uint32_t fbID;
drmModeCrtcPtr crtc;
/* currently only support one output */
drmModeOutputPtr output;
uint32_t outputID;
struct drm_mode_modeinfo *mode;
/* geometry of the screen */
struct egl_drm_frontbuffer front;
};
static void
drm_update_res(struct drm_driver *drm_drv)
{
drmModeFreeResources(drm_drv->res);
drm_drv->res = drmModeGetResources(drm_drv->device->drmFD);
}
static void
drm_add_modes_from_output(_EGLScreen *screen, drmModeOutputPtr output)
{
struct drm_mode_modeinfo *m;
int i;
for (i = 0; i < output->count_modes; i++) {
m = &output->modes[i];
_eglAddNewMode(screen, m->hdisplay, m->vdisplay, m->vrefresh, m->name);
}
}
static EGLBoolean
drm_initialize(_EGLDriver *drv, EGLDisplay dpy, EGLint *major, EGLint *minor)
{
_EGLDisplay *disp = _eglLookupDisplay(dpy);
struct drm_driver *drm_drv = (struct drm_driver *)drv;
struct drm_screen *screen = NULL;
drmModeOutputPtr output = NULL;
drmModeResPtr res = NULL;
EGLint i;
int fd;
fd = drmOpen("i915", NULL);
if (fd < 0) {
return EGL_FALSE;
}
drm_drv->device = egl_drm_create_device(fd);
if (!drm_drv->device) {
drmClose(fd);
return EGL_FALSE;
}
drm_update_res(drm_drv);
res = drm_drv->res;
for(i = 0; i < res->count_outputs; i++) {
output = drmModeGetOutput(fd, res->outputs[i]);
if (!output)
continue;
if (output->connection == DRM_MODE_DISCONNECTED) {
drmModeFreeOutput(output);
continue;
}
screen = malloc(sizeof(struct drm_screen));
memset(screen, 0, sizeof(*screen));
screen->outputID = res->outputs[i];
screen->output = output;
_eglInitScreen(&screen->base);
_eglAddScreen(disp, &screen->base);
drm_add_modes_from_output(&screen->base, output);
}
/* for now we only have one config */
_EGLConfig config;
_eglInitConfig(&config, i + 1);
_eglSetConfigAttrib(&config, EGL_RED_SIZE, 8);
_eglSetConfigAttrib(&config, EGL_GREEN_SIZE, 8);
_eglSetConfigAttrib(&config, EGL_BLUE_SIZE, 8);
_eglSetConfigAttrib(&config, EGL_ALPHA_SIZE, 8);
_eglSetConfigAttrib(&config, EGL_BUFFER_SIZE, 32);
_eglSetConfigAttrib(&config, EGL_DEPTH_SIZE, 24);
_eglSetConfigAttrib(&config, EGL_STENCIL_SIZE, 8);
_eglSetConfigAttrib(&config, EGL_SURFACE_TYPE, EGL_PBUFFER_BIT);
_eglAddConfig(disp, &config);
drv->Initialized = EGL_TRUE;
*major = 1;
*minor = 0;
return EGL_TRUE;
}
static EGLBoolean
drm_terminate(_EGLDriver *drv, EGLDisplay dpy)
{
/* TODO: clean up */
free(drv);
return EGL_TRUE;
}
static struct drm_context *
lookup_drm_context(EGLContext context)
{
_EGLContext *c = _eglLookupContext(context);
return (struct drm_context *) c;
}
static struct drm_surface *
lookup_drm_surface(EGLSurface surface)
{
_EGLSurface *s = _eglLookupSurface(surface);
return (struct drm_surface *) s;
}
static struct drm_screen *
lookup_drm_screen(EGLDisplay dpy, EGLScreenMESA screen)
{
_EGLScreen *s = _eglLookupScreen(dpy, screen);
return (struct drm_screen *) s;
}
static __GLcontextModes*
visual_from_config(_EGLConfig *conf)
{
__GLcontextModes *visual;
(void)conf;
visual = _gl_context_modes_create(1, sizeof(*visual));
visual->redBits = 8;
visual->greenBits = 8;
visual->blueBits = 8;
visual->alphaBits = 8;
visual->rgbBits = 32;
visual->doubleBufferMode = 1;
visual->depthBits = 24;
visual->stencilBits = 8;
return visual;
}
static EGLContext
drm_create_context(_EGLDriver *drv, EGLDisplay dpy, EGLConfig config, EGLContext share_list, const EGLint *attrib_list)
{
struct drm_driver *drm_drv = (struct drm_driver *)drv;
struct drm_context *c;
struct drm_egl_context *share = NULL;
_EGLConfig *conf;
int i;
int ret;
__GLcontextModes *visual;
struct egl_drm_context *context;
conf = _eglLookupConfig(drv, dpy, config);
if (!conf) {
_eglError(EGL_BAD_CONFIG, "eglCreateContext");
return EGL_NO_CONTEXT;
}
for (i = 0; attrib_list && attrib_list[i] != EGL_NONE; i++) {
switch (attrib_list[i]) {
/* no attribs defined for now */
default:
_eglError(EGL_BAD_ATTRIBUTE, "eglCreateContext");
return EGL_NO_CONTEXT;
}
}
c = (struct drm_context *) calloc(1, sizeof(struct drm_context));
if (!c)
return EGL_NO_CONTEXT;
_eglInitContext(drv, dpy, &c->base, config, attrib_list);
context = malloc(sizeof(*context));
memset(context, 0, sizeof(*context));
if (!context)
goto err_c;
context->device = drm_drv->device;
visual = visual_from_config(conf);
ret = intel_create_context(context, visual, share);
free(visual);
if (!ret)
goto err_gl;
c->context = context;
/* generate handle and insert into hash table */
_eglSaveContext(&c->base);
assert(c->base.Handle);
return c->base.Handle;
err_gl:
free(context);
err_c:
free(c);
return EGL_NO_CONTEXT;
}
static EGLBoolean
drm_destroy_context(_EGLDriver *drv, EGLDisplay dpy, EGLContext context)
{
struct drm_context *fc = lookup_drm_context(context);
_eglRemoveContext(&fc->base);
if (fc->base.IsBound) {
fc->base.DeletePending = EGL_TRUE;
} else {
free(fc);
}
return EGL_TRUE;
}
static EGLSurface
drm_create_window_surface(_EGLDriver *drv, EGLDisplay dpy, EGLConfig config, NativeWindowType window, const EGLint *attrib_list)
{
return EGL_NO_SURFACE;
}
static EGLSurface
drm_create_pixmap_surface(_EGLDriver *drv, EGLDisplay dpy, EGLConfig config, NativePixmapType pixmap, const EGLint *attrib_list)
{
return EGL_NO_SURFACE;
}
static EGLSurface
drm_create_pbuffer_surface(_EGLDriver *drv, EGLDisplay dpy, EGLConfig config,
const EGLint *attrib_list)
{
struct drm_driver *drm_drv = (struct drm_driver *)drv;
int i;
int ret;
int width = -1;
int height = -1;
struct drm_surface *surf = NULL;
struct egl_drm_drawable *drawable = NULL;
__GLcontextModes *visual;
_EGLConfig *conf;
conf = _eglLookupConfig(drv, dpy, config);
if (!conf) {
_eglError(EGL_BAD_CONFIG, "eglCreatePbufferSurface");
return EGL_NO_CONTEXT;
}
for (i = 0; attrib_list && attrib_list[i] != EGL_NONE; i++) {
switch (attrib_list[i]) {
case EGL_WIDTH:
width = attrib_list[++i];
break;
case EGL_HEIGHT:
height = attrib_list[++i];
break;
default:
_eglError(EGL_BAD_ATTRIBUTE, "eglCreatePbufferSurface");
return EGL_NO_SURFACE;
}
}
if (width < 1 || height < 1) {
_eglError(EGL_BAD_ATTRIBUTE, "eglCreatePbufferSurface");
return EGL_NO_SURFACE;
}
surf = (struct drm_surface *) calloc(1, sizeof(struct drm_surface));
if (!surf)
goto err;
if (!_eglInitSurface(drv, dpy, &surf->base, EGL_PBUFFER_BIT, config, attrib_list))
goto err_surf;
drawable = malloc(sizeof(*drawable));
memset(drawable, 0, sizeof(*drawable));
drawable->w = width;
drawable->h = height;
visual = visual_from_config(conf);
drawable->device = drm_drv->device;
ret = intel_create_drawable(drawable, visual);
free(visual);
if (!ret)
goto err_draw;
surf->drawable = drawable;
_eglSaveSurface(&surf->base);
return surf->base.Handle;
err_draw:
free(drawable);
err_surf:
free(surf);
err:
return EGL_NO_SURFACE;
}
static EGLSurface
drm_create_screen_surface_mesa(_EGLDriver *drv, EGLDisplay dpy, EGLConfig cfg,
const EGLint *attrib_list)
{
EGLSurface surf = drm_create_pbuffer_surface(drv, dpy, cfg, attrib_list);
return surf;
}
static struct drm_mode_modeinfo *
drm_find_mode(drmModeOutputPtr output, _EGLMode *mode)
{
int i;
struct drm_mode_modeinfo *m;
for (i = 0; i < output->count_modes; i++) {
m = &output->modes[i];
if (m->hdisplay == mode->Width && m->vdisplay == mode->Height && m->vrefresh == mode->RefreshRate)
break;
m = NULL;
}
return m;
}
static void
draw(size_t x, size_t y, size_t w, size_t h, size_t pitch, size_t v, unsigned int *ptr)
{
int i, j;
for (i = x; i < x + w; i++)
for(j = y; j < y + h; j++)
ptr[(i * pitch / 4) + j] = v;
}
static void
prettyColors(int fd, unsigned int handle, size_t pitch)
{
drmBO bo;
unsigned int *ptr;
int i;
drmBOReference(fd, handle, &bo);
drmBOMap(fd, &bo, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0, (void**)&ptr);
for (i = 0; i < (bo.size / 4); i++)
ptr[i] = 0xFFFFFFFF;
for (i = 0; i < 4; i++)
draw(i * 40, i * 40, 40, 40, pitch, 0, ptr);
draw(200, 100, 40, 40, pitch, 0xff00ff, ptr);
draw(100, 200, 40, 40, pitch, 0xff00ff, ptr);
drmBOUnmap(fd, &bo);
}
static EGLBoolean
drm_show_screen_surface_mesa(_EGLDriver *drv, EGLDisplay dpy,
EGLScreenMESA screen,
EGLSurface surface, EGLModeMESA m)
{
struct drm_driver *drm_drv = (struct drm_driver *)drv;
struct drm_surface *surf = lookup_drm_surface(surface);
struct drm_screen *scrn = lookup_drm_screen(dpy, screen);
//struct intel_framebuffer *intel_fb = NULL;
//struct pipe_surface *front_surf = NULL;
_EGLMode *mode = _eglLookupMode(dpy, m);
size_t pitch = 2048 * 4;
size_t size = mode->Height * pitch;
int ret;
/* TODO if allready shown take down */
printf("setting mode to %i x %i\n", mode->Width, mode->Height);
ret = drmBOCreate(drm_drv->device->drmFD, size, 0, 0,
DRM_BO_FLAG_READ |
DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_MEM_TT |
DRM_BO_FLAG_MEM_VRAM |
DRM_BO_FLAG_NO_EVICT,
DRM_BO_HINT_DONT_FENCE, &scrn->buffer);
prettyColors(drm_drv->device->drmFD, scrn->buffer.handle, pitch);
if (ret) {
printf("failed to create framebuffer (ret %d)\n", ret);
return EGL_FALSE;
}
ret = drmModeAddFB(drm_drv->device->drmFD, mode->Width, mode->Height,
32, 32, pitch,
scrn->buffer.handle,
&scrn->fbID);
if (ret)
goto err_bo;
scrn->fb = drmModeGetFB(drm_drv->device->drmFD, scrn->fbID);
if (!scrn->fb)
goto err_bo;
scrn->mode = drm_find_mode(scrn->output, mode);
if (!scrn->mode) {
printf("oh noes, no matching mode found\n");
goto err_fb;
}
ret = drmModeSetCrtc(
drm_drv->device->drmFD,
drm_drv->res->crtcs[1],
scrn->fbID,
0, 0,
&scrn->outputID, 1,
scrn->mode);
scrn->front.handle = scrn->buffer.handle;
scrn->front.pitch = pitch;
scrn->front.width = mode->Width;
scrn->front.height = mode->Height;
intel_bind_frontbuffer(surf->drawable, &scrn->front);
return EGL_TRUE;
err_fb:
/* TODO remove fb */
err_bo:
drmBOUnreference(drm_drv->device->drmFD, &scrn->buffer);
return EGL_FALSE;
}
static EGLBoolean
drm_destroy_surface(_EGLDriver *drv, EGLDisplay dpy, EGLSurface surface)
{
struct drm_surface *fs = lookup_drm_surface(surface);
_eglRemoveSurface(&fs->base);
if (fs->base.IsBound) {
fs->base.DeletePending = EGL_TRUE;
} else {
free(fs);
}
return EGL_TRUE;
}
static EGLBoolean
drm_make_current(_EGLDriver *drv, EGLDisplay dpy, EGLSurface draw, EGLSurface read, EGLContext context)
{
struct drm_surface *readSurf = lookup_drm_surface(read);
struct drm_surface *drawSurf = lookup_drm_surface(draw);
struct drm_context *ctx = lookup_drm_context(context);
EGLBoolean b;
b = _eglMakeCurrent(drv, dpy, draw, read, context);
if (!b)
return EGL_FALSE;
/* XXX this is where we'd do the hardware context switch */
(void) drawSurf;
(void) readSurf;
(void) ctx;
intel_make_current(ctx->context, drawSurf->drawable, readSurf->drawable);
return EGL_TRUE;
}
static EGLBoolean
drm_swap_buffers(_EGLDriver *drv, EGLDisplay dpy, EGLSurface draw)
{
struct drm_surface *surf = lookup_drm_surface(draw);
if (!surf)
return EGL_FALSE;
/* error checking */
if (!_eglSwapBuffers(drv, dpy, draw))
return EGL_FALSE;
intel_swap_buffers(surf->drawable);
return EGL_TRUE;
}
/**
* The bootstrap function. Return a new drm_driver object and
* plug in API functions.
*/
_EGLDriver *
_eglMain(_EGLDisplay *dpy)
{
struct drm_driver *drm;
drm = (struct drm_driver *) calloc(1, sizeof(struct drm_driver));
if (!drm) {
return NULL;
}
/* First fill in the dispatch table with defaults */
_eglInitDriverFallbacks(&drm->base);
/* then plug in our Drm-specific functions */
drm->base.API.Initialize = drm_initialize;
drm->base.API.Terminate = drm_terminate;
drm->base.API.CreateContext = drm_create_context;
drm->base.API.MakeCurrent = drm_make_current;
drm->base.API.CreateWindowSurface = drm_create_window_surface;
drm->base.API.CreatePixmapSurface = drm_create_pixmap_surface;
drm->base.API.CreatePbufferSurface = drm_create_pbuffer_surface;
drm->base.API.DestroySurface = drm_destroy_surface;
drm->base.API.DestroyContext = drm_destroy_context;
drm->base.API.CreateScreenSurfaceMESA = drm_create_screen_surface_mesa;
drm->base.API.ShowScreenSurfaceMESA = drm_show_screen_surface_mesa;
drm->base.API.SwapBuffers = drm_swap_buffers;
/* enable supported extensions */
drm->base.Extensions.MESA_screen_surface = EGL_TRUE;
drm->base.Extensions.MESA_copy_context = EGL_TRUE;
return &drm->base;
}
@@ -0,0 +1,42 @@
#ifndef _INTEL_EGL_H_
#define _INTEL_EGL_H_
struct egl_drm_device
{
void *priv;
int drmFD;
};
struct egl_drm_context
{
void *priv;
struct egl_drm_device *device;
};
struct egl_drm_drawable
{
void *priv;
struct egl_drm_device *device;
size_t h;
size_t w;
};
struct egl_drm_frontbuffer
{
uint32_t handle;
uint32_t pitch;
uint32_t width;
uint32_t height;
};
#include "GL/internal/glcore.h"
int intel_init_driver(struct egl_drm_device *device);
int intel_create_context(struct egl_drm_context *context, const __GLcontextModes *visual, void *sharedContextPrivate);
int intel_create_drawable(struct egl_drm_drawable *drawable, const __GLcontextModes * visual);
void intel_make_current(struct egl_drm_context *context, struct egl_drm_drawable *draw, struct egl_drm_drawable *read);
void intel_swap_buffers(struct egl_drm_drawable *draw);
void intel_bind_frontbuffer(struct egl_drm_drawable *draw, struct egl_drm_frontbuffer *front);
#endif
@@ -0,0 +1,102 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "main/glheader.h"
#include "glapi/glthread.h"
#include <GL/internal/glcore.h>
#include "state_tracker/st_public.h"
#include "intel_context.h"
#if 0
_glthread_DECLARE_STATIC_MUTEX( lockMutex );
static void
intelContendedLock(struct intel_context *intel, uint flags)
{
__DRIdrawablePrivate *dPriv = intel->driDrawable;
__DRIscreenPrivate *sPriv = intel->driScreen;
struct intel_screen *intelScreen = intel_screen(sPriv);
drmI830Sarea *sarea = intel->sarea;
drmGetLock(intel->driFd, intel->hHWContext, flags);
DBG(LOCK, "%s - got contended lock\n", __progname);
/* If the window moved, may need to set a new cliprect now.
*
* NOTE: This releases and regains the hw lock, so all state
* checking must be done *after* this call:
*/
if (dPriv)
DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
if (sarea->width != intelScreen->front.width ||
sarea->height != intelScreen->front.height) {
intelUpdateScreenRotation(sPriv, sarea);
}
}
/* Lock the hardware and validate our state.
*/
void LOCK_HARDWARE( struct intel_context *intel )
{
char __ret = 0;
_glthread_LOCK_MUTEX(lockMutex);
assert(!intel->locked);
DRM_CAS(intel->driHwLock, intel->hHWContext,
(DRM_LOCK_HELD|intel->hHWContext), __ret);
if (__ret)
intelContendedLock( intel, 0 );
DBG(LOCK, "%s - locked\n", __progname);
intel->locked = 1;
}
/* Unlock the hardware using the global current context
*/
void UNLOCK_HARDWARE( struct intel_context *intel )
{
assert(intel->locked);
intel->locked = 0;
DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
_glthread_UNLOCK_MUTEX(lockMutex);
DBG(LOCK, "%s - unlocked\n", __progname);
}
#endif
@@ -0,0 +1,53 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _INTEL_REG_H_
#define _INTEL_REG_H_
#define BR00_BITBLT_CLIENT 0x40000000
#define BR00_OP_COLOR_BLT 0x10000000
#define BR00_OP_SRC_COPY_BLT 0x10C00000
#define BR13_SOLID_PATTERN 0x80000000
#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
#define XY_COLOR_BLT_WRITE_RGB (1<<20)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
#define MI_WAIT_FOR_EVENT ((0x3<<23))
#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
#define MI_BATCH_BUFFER_END (0xA<<23)
#endif
@@ -0,0 +1,680 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "utils.h"
#include "vblank.h"
#include "xmlpool.h"
#include "intel_context.h"
#include "intel_screen.h"
#include "intel_batchbuffer.h"
//#include "intel_batchpool.h"
#include "intel_swapbuffers.h"
#include "intel_winsys.h"
#include "ws_dri_bufpool.h"
#include "pipe/p_context.h"
#include "state_tracker/st_public.h"
#include "state_tracker/st_cb_fbo.h"
#include "intel_egl.h"
static boolean
intel_create_pools(struct intel_screen *intel_screen)
{
if (intel_screen->havePools)
return GL_TRUE;
intel_screen->mgr = driFenceMgrTTMInit(intel_screen->device->drmFD);
if (!intel_screen->mgr) {
fprintf(stderr, "Failed to create fence manager.\n");
return FALSE;
}
intel_screen->fMan = driInitFreeSlabManager(10, 10);
if (!intel_screen->fMan) {
fprintf(stderr, "Failed to create free slab manager.\n");
return FALSE;
}
intel_screen->staticPool = driDRMPoolInit(intel_screen->device->drmFD);
intel_screen->batchPool = driSlabPoolInit(intel_screen->device->drmFD,
DRM_BO_FLAG_EXE |
DRM_BO_FLAG_MEM_TT,
DRM_BO_FLAG_EXE |
DRM_BO_FLAG_MEM_TT,
4096, //intelScreen->maxBatchSize,
1, 40, 16*16384, 0,
intel_screen->fMan);
intel_screen->havePools = GL_TRUE;
return GL_TRUE;
}
extern const struct dri_extension card_extensions[];
int
intel_init_driver(struct egl_drm_device *device)
{
struct intel_screen *intel_screen;
/* Allocate the private area */
intel_screen = CALLOC_STRUCT(intel_screen);
if (!intel_screen)
return FALSE;
device->priv = (void *)intel_screen;
intel_screen->device = device;
if (!intel_create_pools(intel_screen))
return FALSE;
intel_screen->batch = intel_batchbuffer_alloc(intel_screen);
intel_screen->winsys = intel_create_pipe_winsys(device->drmFD, intel_screen->fMan);
/* hack */
driInitExtensions(NULL, card_extensions, GL_FALSE);
return TRUE;
}
int
intel_create_drawable(struct egl_drm_drawable *drawable,
const __GLcontextModes * visual)
{
enum pipe_format colorFormat, depthFormat, stencilFormat;
struct intel_framebuffer *intelfb = CALLOC_STRUCT(intel_framebuffer);
if (!intelfb)
return GL_FALSE;
if (visual->redBits == 5)
colorFormat = PIPE_FORMAT_R5G6B5_UNORM;
else
colorFormat = PIPE_FORMAT_A8R8G8B8_UNORM;
if (visual->depthBits == 16)
depthFormat = PIPE_FORMAT_Z16_UNORM;
else if (visual->depthBits == 24)
depthFormat = PIPE_FORMAT_S8Z24_UNORM;
else
depthFormat = PIPE_FORMAT_NONE;
if (visual->stencilBits == 8)
stencilFormat = PIPE_FORMAT_S8Z24_UNORM;
else
stencilFormat = PIPE_FORMAT_NONE;
intelfb->stfb = st_create_framebuffer(visual,
colorFormat,
depthFormat,
stencilFormat,
drawable->w,
drawable->h,
(void*) intelfb);
if (!intelfb->stfb) {
free(intelfb);
return GL_FALSE;
}
drawable->priv = (void *) intelfb;
return GL_TRUE;
}
#if 0
PUBLIC const char __driConfigOptions[] =
DRI_CONF_BEGIN DRI_CONF_SECTION_PERFORMANCE
DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_DEF_INTERVAL_0)
DRI_CONF_SECTION_END DRI_CONF_SECTION_QUALITY
DRI_CONF_FORCE_S3TC_ENABLE(false)
DRI_CONF_ALLOW_LARGE_TEXTURES(1)
DRI_CONF_SECTION_END DRI_CONF_END;
const uint __driNConfigOptions = 4;
#ifdef USE_NEW_INTERFACE
static PFNGLXCREATECONTEXTMODES create_context_modes = NULL;
#endif /*USE_NEW_INTERFACE */
extern const struct dri_extension card_extensions[];
static void
intelPrintDRIInfo(struct intel_screen * intelScreen,
__DRIscreenPrivate * sPriv, I830DRIPtr gDRIPriv)
{
fprintf(stderr, "*** Front size: 0x%x offset: 0x%x pitch: %d\n",
intelScreen->front.size, intelScreen->front.offset,
intelScreen->front.pitch);
fprintf(stderr, "*** Memory : 0x%x\n", gDRIPriv->mem);
}
static void
intelPrintSAREA(const drmI830Sarea * sarea)
{
fprintf(stderr, "SAREA: sarea width %d height %d\n", sarea->width,
sarea->height);
fprintf(stderr, "SAREA: pitch: %d\n", sarea->pitch);
fprintf(stderr,
"SAREA: front offset: 0x%08x size: 0x%x handle: 0x%x\n",
sarea->front_offset, sarea->front_size,
(unsigned) sarea->front_handle);
fprintf(stderr,
"SAREA: back offset: 0x%08x size: 0x%x handle: 0x%x\n",
sarea->back_offset, sarea->back_size,
(unsigned) sarea->back_handle);
fprintf(stderr, "SAREA: depth offset: 0x%08x size: 0x%x handle: 0x%x\n",
sarea->depth_offset, sarea->depth_size,
(unsigned) sarea->depth_handle);
fprintf(stderr, "SAREA: tex offset: 0x%08x size: 0x%x handle: 0x%x\n",
sarea->tex_offset, sarea->tex_size, (unsigned) sarea->tex_handle);
fprintf(stderr, "SAREA: rotation: %d\n", sarea->rotation);
fprintf(stderr,
"SAREA: rotated offset: 0x%08x size: 0x%x\n",
sarea->rotated_offset, sarea->rotated_size);
fprintf(stderr, "SAREA: rotated pitch: %d\n", sarea->rotated_pitch);
}
/**
* Use the information in the sarea to update the screen parameters
* related to screen rotation. Needs to be called locked.
*/
void
intelUpdateScreenRotation(__DRIscreenPrivate * sPriv, drmI830Sarea * sarea)
{
struct intel_screen *intelScreen = intel_screen(sPriv);
if (intelScreen->front.map) {
drmUnmap(intelScreen->front.map, intelScreen->front.size);
intelScreen->front.map = NULL;
}
if (intelScreen->front.buffer)
driDeleteBuffers(1, &intelScreen->front.buffer);
intelScreen->front.width = sarea->width;
intelScreen->front.height = sarea->height;
intelScreen->front.offset = sarea->front_offset;
intelScreen->front.pitch = sarea->pitch * intelScreen->front.cpp;
intelScreen->front.size = sarea->front_size;
intelScreen->front.handle = sarea->front_handle;
assert( sarea->front_size >=
intelScreen->front.pitch * intelScreen->front.height );
#if 0 /* JB not important */
if (!sarea->front_handle)
return;
if (drmMap(sPriv->fd,
sarea->front_handle,
intelScreen->front.size,
(drmAddress *) & intelScreen->front.map) != 0) {
fprintf(stderr, "drmMap(frontbuffer) failed!\n");
return;
}
#endif
#if 0 /* JB */
if (intelScreen->staticPool) {
driGenBuffers(intelScreen->staticPool, "static region", 1,
&intelScreen->front.buffer, 64,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_NO_MOVE |
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
driBOSetStatic(intelScreen->front.buffer,
intelScreen->front.offset,
intelScreen->front.pitch * intelScreen->front.height,
intelScreen->front.map, 0);
}
#else
if (intelScreen->staticPool) {
if (intelScreen->front.buffer)
driBOUnReference(intelScreen->front.buffer);
driGenBuffers(intelScreen->staticPool, "front", 1, &intelScreen->front.buffer, 0, 0, 0);
driBOSetReferenced(intelScreen->front.buffer, sarea->front_bo_handle);
}
#endif
}
boolean
intelCreatePools(__DRIscreenPrivate * sPriv)
{
//unsigned batchPoolSize = 1024*1024;
struct intel_screen *intelScreen = intel_screen(sPriv);
if (intelScreen->havePools)
return GL_TRUE;
#if 0 /* ZZZ JB fix this */
intelScreen->staticPool = driDRMStaticPoolInit(sPriv->fd);
if (!intelScreen->staticPool)
return GL_FALSE;
batchPoolSize /= BATCH_SZ;
intelScreen->batchPool = driBatchPoolInit(sPriv->fd,
DRM_BO_FLAG_EXE |
DRM_BO_FLAG_MEM_TT |
DRM_BO_FLAG_MEM_LOCAL,
BATCH_SZ,
batchPoolSize, 5);
if (!intelScreen->batchPool) {
fprintf(stderr, "Failed to initialize batch pool - possible incorrect agpgart installed\n");
return GL_FALSE;
}
#else
intelScreen->staticPool = driDRMPoolInit(sPriv->fd);
intelScreen->batchPool = driSlabPoolInit(sPriv->fd,
DRM_BO_FLAG_EXE |
DRM_BO_FLAG_MEM_TT,
DRM_BO_FLAG_EXE |
DRM_BO_FLAG_MEM_TT,
4096, //intelScreen->maxBatchSize,
1, 40, 16*16384, 0,
intelScreen->fMan);
#endif
intelScreen->havePools = GL_TRUE;
//intelUpdateScreenRotation(sPriv, intelScreen->sarea);
return GL_TRUE;
}
static boolean
intelInitDriver(__DRIscreenPrivate * sPriv)
{
struct intel_screen *intelScreen;
I830DRIPtr gDRIPriv = (I830DRIPtr) sPriv->pDevPriv;
PFNGLXSCRENABLEEXTENSIONPROC glx_enable_extension =
(PFNGLXSCRENABLEEXTENSIONPROC) (*dri_interface->
getProcAddress("glxEnableExtension"));
void *const psc = sPriv->psc->screenConfigs;
if (sPriv->devPrivSize != sizeof(I830DRIRec)) {
fprintf(stderr,
"\nERROR! sizeof(I830DRIRec) does not match passed size from device driver\n");
return GL_FALSE;
}
/* Allocate the private area */
intelScreen = CALLOC_STRUCT(intel_screen);
if (!intelScreen)
return GL_FALSE;
/* parse information in __driConfigOptions */
driParseOptionInfo(&intelScreen->optionCache,
__driConfigOptions, __driNConfigOptions);
sPriv->private = (void *) intelScreen;
intelScreen->sarea = (drmI830Sarea *) (((GLubyte *) sPriv->pSAREA) +
gDRIPriv->sarea_priv_offset);
intelScreen->deviceID = gDRIPriv->deviceID;
intelScreen->front.cpp = gDRIPriv->cpp;
intelScreen->drmMinor = sPriv->drmMinor;
assert(gDRIPriv->bitsPerPixel == 16 ||
gDRIPriv->bitsPerPixel == 32);
intelUpdateScreenRotation(sPriv, intelScreen->sarea);
if (0)
intelPrintDRIInfo(intelScreen, sPriv, gDRIPriv);
if (glx_enable_extension != NULL) {
(*glx_enable_extension) (psc, "GLX_SGI_swap_control");
(*glx_enable_extension) (psc, "GLX_SGI_video_sync");
(*glx_enable_extension) (psc, "GLX_MESA_swap_control");
(*glx_enable_extension) (psc, "GLX_MESA_swap_frame_usage");
(*glx_enable_extension) (psc, "GLX_SGI_make_current_read");
}
#if 1 // ZZZ JB
intelScreen->mgr = driFenceMgrTTMInit(sPriv->fd);
if (!intelScreen->mgr) {
fprintf(stderr, "Failed to create fence manager.\n");
return GL_FALSE;
}
intelScreen->fMan = driInitFreeSlabManager(10, 10);
if (!intelScreen->fMan) {
fprintf(stderr, "Failed to create free slab manager.\n");
return GL_FALSE;
}
if (!intelCreatePools(sPriv))
return GL_FALSE;
#endif
intelScreen->winsys = intel_create_pipe_winsys(sPriv->fd, intelScreen->fMan);
return GL_TRUE;
}
static void
intelDestroyScreen(__DRIscreenPrivate * sPriv)
{
struct intel_screen *intelScreen = intel_screen(sPriv);
/* intelUnmapScreenRegions(intelScreen); */
if (intelScreen->havePools) {
driPoolTakeDown(intelScreen->staticPool);
driPoolTakeDown(intelScreen->batchPool);
}
FREE(intelScreen);
sPriv->private = NULL;
}
/**
* This is called when we need to set up GL rendering to a new X window.
*/
static boolean
intelCreateBuffer(__DRIscreenPrivate * driScrnPriv,
__DRIdrawablePrivate * driDrawPriv,
const __GLcontextModes * visual, boolean isPixmap)
{
if (isPixmap) {
return GL_FALSE; /* not implemented */
}
else {
enum pipe_format colorFormat, depthFormat, stencilFormat;
struct intel_framebuffer *intelfb = CALLOC_STRUCT(intel_framebuffer);
if (!intelfb)
return GL_FALSE;
if (visual->redBits == 5)
colorFormat = PIPE_FORMAT_R5G6B5_UNORM;
else
colorFormat = PIPE_FORMAT_A8R8G8B8_UNORM;
if (visual->depthBits == 16)
depthFormat = PIPE_FORMAT_Z16_UNORM;
else if (visual->depthBits == 24)
depthFormat = PIPE_FORMAT_S8Z24_UNORM;
else
depthFormat = PIPE_FORMAT_NONE;
if (visual->stencilBits == 8)
stencilFormat = PIPE_FORMAT_S8Z24_UNORM;
else
stencilFormat = PIPE_FORMAT_NONE;
intelfb->stfb = st_create_framebuffer(visual,
colorFormat,
depthFormat,
stencilFormat,
driDrawPriv->w,
driDrawPriv->h,
(void*) intelfb);
if (!intelfb->stfb) {
free(intelfb);
return GL_FALSE;
}
driDrawPriv->driverPrivate = (void *) intelfb;
return GL_TRUE;
}
}
static void
intelDestroyBuffer(__DRIdrawablePrivate * driDrawPriv)
{
struct intel_framebuffer *intelfb = intel_framebuffer(driDrawPriv);
assert(intelfb->stfb);
st_unreference_framebuffer(&intelfb->stfb);
free(intelfb);
}
/**
* Get information about previous buffer swaps.
*/
static int
intelGetSwapInfo(__DRIdrawablePrivate * dPriv, __DRIswapInfo * sInfo)
{
if ((dPriv == NULL) || (dPriv->driverPrivate == NULL)
|| (sInfo == NULL)) {
return -1;
}
return 0;
}
static void
intelSetTexOffset(__DRIcontext *pDRICtx, int texname,
unsigned long long offset, int depth, uint pitch)
{
abort();
#if 0
struct intel_context *intel = (struct intel_context*)
((__DRIcontextPrivate*)pDRICtx->private)->driverPrivate;
struct gl_texture_object *tObj = _mesa_lookup_texture(&intel->ctx, texname);
struct st_texture_object *stObj = st_texture_object(tObj);
if (!stObj)
return;
if (stObj->pt)
st->pipe->texture_release(intel->st->pipe, &stObj->pt);
stObj->imageOverride = GL_TRUE;
stObj->depthOverride = depth;
stObj->pitchOverride = pitch;
if (offset)
stObj->textureOffset = offset;
#endif
}
static const struct __DriverAPIRec intelAPI = {
.InitDriver = intelInitDriver,
.DestroyScreen = intelDestroyScreen,
.CreateContext = intelCreateContext,
.DestroyContext = intelDestroyContext,
.CreateBuffer = intelCreateBuffer,
.DestroyBuffer = intelDestroyBuffer,
.SwapBuffers = intelSwapBuffers,
.MakeCurrent = intelMakeCurrent,
.UnbindContext = intelUnbindContext,
.GetSwapInfo = intelGetSwapInfo,
.GetMSC = driGetMSC32,
.WaitForMSC = driWaitForMSC32,
.WaitForSBC = NULL,
.SwapBuffersMSC = NULL,
.CopySubBuffer = intelCopySubBuffer,
.setTexOffset = intelSetTexOffset,
};
static __GLcontextModes *
intelFillInModes(unsigned pixel_bits, unsigned depth_bits,
unsigned stencil_bits, boolean have_back_buffer)
{
__GLcontextModes *modes;
__GLcontextModes *m;
unsigned num_modes;
unsigned depth_buffer_factor;
unsigned back_buffer_factor;
GLenum fb_format;
GLenum fb_type;
/* GLX_SWAP_COPY_OML is only supported because the Intel driver doesn't
* support pageflipping at all.
*/
static const GLenum back_buffer_modes[] = {
GLX_NONE, GLX_SWAP_UNDEFINED_OML, GLX_SWAP_COPY_OML
};
u_int8_t depth_bits_array[3];
u_int8_t stencil_bits_array[3];
depth_bits_array[0] = 0;
depth_bits_array[1] = depth_bits;
depth_bits_array[2] = depth_bits;
/* Just like with the accumulation buffer, always provide some modes
* with a stencil buffer. It will be a sw fallback, but some apps won't
* care about that.
*/
stencil_bits_array[0] = 0;
stencil_bits_array[1] = 0;
if (depth_bits == 24)
stencil_bits_array[1] = (stencil_bits == 0) ? 8 : stencil_bits;
stencil_bits_array[2] = (stencil_bits == 0) ? 8 : stencil_bits;
depth_buffer_factor = ((depth_bits != 0) || (stencil_bits != 0)) ? 3 : 1;
back_buffer_factor = (have_back_buffer) ? 3 : 1;
num_modes = depth_buffer_factor * back_buffer_factor * 4;
if (pixel_bits == 16) {
fb_format = GL_RGB;
fb_type = GL_UNSIGNED_SHORT_5_6_5;
}
else {
fb_format = GL_BGRA;
fb_type = GL_UNSIGNED_INT_8_8_8_8_REV;
}
modes =
(*dri_interface->createContextModes) (num_modes,
sizeof(__GLcontextModes));
m = modes;
if (!driFillInModes(&m, fb_format, fb_type,
depth_bits_array, stencil_bits_array,
depth_buffer_factor, back_buffer_modes,
back_buffer_factor, GLX_TRUE_COLOR)) {
fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
__LINE__);
return NULL;
}
if (!driFillInModes(&m, fb_format, fb_type,
depth_bits_array, stencil_bits_array,
depth_buffer_factor, back_buffer_modes,
back_buffer_factor, GLX_DIRECT_COLOR)) {
fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
__LINE__);
return NULL;
}
/* Mark the visual as slow if there are "fake" stencil bits.
*/
for (m = modes; m != NULL; m = m->next) {
if ((m->stencilBits != 0) && (m->stencilBits != stencil_bits)) {
m->visualRating = GLX_SLOW_CONFIG;
}
}
return modes;
}
/**
* This is the bootstrap function for the driver. libGL supplies all of the
* requisite information about the system, and the driver initializes itself.
* This routine also fills in the linked list pointed to by \c driver_modes
* with the \c __GLcontextModes that the driver can support for windows or
* pbuffers.
*
* \return A pointer to a \c __DRIscreenPrivate on success, or \c NULL on
* failure.
*/
PUBLIC void *
__driCreateNewScreen_20050727(__DRInativeDisplay * dpy, int scrn,
__DRIscreen * psc,
const __GLcontextModes * modes,
const __DRIversion * ddx_version,
const __DRIversion * dri_version,
const __DRIversion * drm_version,
const __DRIframebuffer * frame_buffer,
drmAddress pSAREA, int fd,
int internal_api_version,
const __DRIinterfaceMethods * interface,
__GLcontextModes ** driver_modes)
{
__DRIscreenPrivate *psp;
static const __DRIversion ddx_expected = { 1, 7, 0 };
static const __DRIversion dri_expected = { 4, 0, 0 };
static const __DRIversion drm_expected = { 1, 7, 0 };
dri_interface = interface;
if (!driCheckDriDdxDrmVersions2("i915",
dri_version, &dri_expected,
ddx_version, &ddx_expected,
drm_version, &drm_expected)) {
return NULL;
}
psp = __driUtilCreateNewScreen(dpy, scrn, psc, NULL,
ddx_version, dri_version, drm_version,
frame_buffer, pSAREA, fd,
internal_api_version, &intelAPI);
if (psp != NULL) {
I830DRIPtr dri_priv = (I830DRIPtr) psp->pDevPriv;
*driver_modes = intelFillInModes(dri_priv->cpp * 8,
(dri_priv->cpp == 2) ? 16 : 24,
(dri_priv->cpp == 2) ? 0 : 8, 1);
/* Calling driInitExtensions here, with a NULL context pointer,
* does not actually enable the extensions. It just makes sure
* that all the dispatch offsets for all the extensions that
* *might* be enables are known. This is needed because the
* dispatch offsets need to be known when _mesa_context_create
* is called, but we can't enable the extensions until we have a
* context pointer.
*
* Hello chicken. Hello egg. How are you two today?
*/
driInitExtensions(NULL, card_extensions, GL_FALSE);
}
return (void *) psp;
}
#endif
@@ -0,0 +1,133 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _INTEL_SCREEN_H_
#define _INTEL_SCREEN_H_
#include "ws_dri_bufpool.h"
#include "pipe/p_compiler.h"
struct egl_drm_device *device;
struct intel_screen
{
#if 0
struct {
drm_handle_t handle;
/* We create a static dri buffer for the frontbuffer.
*/
struct _DriBufferObject *buffer;
char *map; /* memory map */
int offset; /* from start of video mem, in bytes */
int pitch; /* row stride, in bytes */
int width;
int height;
int size;
int cpp; /* for front and back buffers */
} front;
#endif
int drmFB;
#if 0
int deviceID;
int drmMinor;
drmI830Sarea *sarea;*/
/**
* Configuration cache with default values for all contexts
*/
driOptionCache optionCache;
#endif
struct _DriBufferPool *batchPool;
struct _DriBufferPool *staticPool; /** for the X screen/framebuffer */
boolean havePools;
#if 0
/**
* Temporary(?) context to use for SwapBuffers or other situations in
* which we need a rendering context, but none is currently bound.
*/
struct intel_context *dummyContext;
#endif
/*
* New stuff form the i915tex integration
*/
struct _DriFenceMgr *mgr;
struct _DriFreeSlabManager *fMan;
unsigned batch_id;
struct pipe_winsys *winsys;
struct egl_drm_device *device;
/* batch buffer used for swap buffers */
struct intel_batchbuffer *batch;
};
/** cast wrapper */
#if 0
static INLINE struct intel_screen *
intel_screen(__DRIscreenPrivate *sPriv)
{
return (struct intel_screen *) sPriv->private;
}
extern void
intelUpdateScreenRotation(__DRIscreenPrivate * sPriv, drmI830Sarea * sarea);
extern void intelDestroyContext(__DRIcontextPrivate * driContextPriv);
extern boolean intelUnbindContext(__DRIcontextPrivate * driContextPriv);
extern boolean
intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
__DRIdrawablePrivate * driDrawPriv,
__DRIdrawablePrivate * driReadPriv);
extern boolean
intelCreatePools(__DRIscreenPrivate *sPriv);
extern boolean
intelCreateContext(const __GLcontextModes * visual,
__DRIcontextPrivate * driContextPriv,
void *sharedContextPrivate);
#endif
#endif
@@ -0,0 +1,327 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_swapbuffers.h"
#include "intel_batchbuffer.h"
#include "intel_reg.h"
#include "intel_winsys.h"
#include "pipe/p_context.h"
#include "state_tracker/st_public.h"
#include "state_tracker/st_context.h"
#include "state_tracker/st_cb_fbo.h"
#include "intel_egl.h"
static void
intel_display_surface(struct egl_drm_drawable *draw,
struct pipe_surface *surf);
void intel_swap_buffers(struct egl_drm_drawable *draw)
{
struct intel_framebuffer *intel_fb = (struct intel_framebuffer *)draw->priv;
struct pipe_surface *back_surf;
assert(intel_fb);
assert(intel_fb->stfb);
back_surf = st_get_framebuffer_surface(intel_fb->stfb, ST_SURFACE_BACK_LEFT);
if (back_surf) {
st_notify_swapbuffers(intel_fb->stfb);
intel_display_surface(draw, back_surf);
st_notify_swapbuffers_complete(intel_fb->stfb);
}
}
static void
intel_display_surface(struct egl_drm_drawable *draw,
struct pipe_surface *surf)
{
struct intel_screen *intel = (struct intel_screen *)draw->device->priv;
struct intel_framebuffer *intel_fb = (struct intel_framebuffer *)draw->priv;
struct _DriFenceObject *fence;
//const int srcWidth = surf->width;
//const int srcHeight = surf->height;
const int srcPitch = surf->pitch;
const int dstWidth = intel_fb->front->width;
const int dstHeight = intel_fb->front->height;
const int dstPitch = intel_fb->front->pitch / 4;//draw->front.cpp;
const int cpp = 4;//intel_fb->front->cpp;
int BR13, CMD;
//int i;
BR13 = (dstPitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
printf("srcPitch: %u, dstWidth: %u, dstHeight: %u, dstPitch: %u, cpp: %u\n", srcPitch, dstWidth, dstHeight, dstPitch, cpp);
BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((0 << 16) | 0);
OUT_BATCH((dstHeight << 16) | dstWidth);
OUT_RELOC(intel_fb->front_buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, 0);
OUT_BATCH((0 << 16) | 0);
OUT_BATCH((srcPitch * cpp) & 0xffff);
OUT_RELOC(dri_bo(surf->buffer),
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
DRM_BO_MASK_MEM | DRM_BO_FLAG_READ, 0);
ADVANCE_BATCH();
fence = intel_batchbuffer_flush(intel->batch);
driFenceUnReference(&fence);
intel_batchbuffer_finish(intel->batch);
}
#if 0
/**
* Display a colorbuffer surface in an X window.
* Used for SwapBuffers and flushing front buffer rendering.
*
* \param dPriv the window/drawable to display into
* \param surf the surface to display
* \param rect optional subrect of surface to display (may be NULL).
*/
void
intelDisplaySurface(__DRIdrawablePrivate *dPriv,
struct pipe_surface *surf,
const drm_clip_rect_t *rect)
{
struct intel_screen *intelScreen = intel_screen(dPriv->driScreenPriv);
//struct intel_context *intel = intelScreen->dummyContext;
DBG(SWAP, "%s\n", __FUNCTION__);
#if 0
if (!intel) {
/* XXX this is where some kind of extra/meta context could be useful */
return;
}
#endif
if (intel->last_swap_fence) {
driFenceFinish(intel->last_swap_fence, DRM_FENCE_TYPE_EXE, TRUE);
driFenceUnReference(&intel->last_swap_fence);
intel->last_swap_fence = NULL;
}
intel->last_swap_fence = intel->first_swap_fence;
intel->first_swap_fence = NULL;
/* The LOCK_HARDWARE is required for the cliprects. Buffer offsets
* should work regardless.
*/
LOCK_HARDWARE(intel);
/* if this drawable isn't currently bound the LOCK_HARDWARE done on the
* current context (which is what intelScreenContext should return) might
* not get a contended lock and thus cliprects not updated (tests/manywin)
*/
if (intel_context(dPriv->driContextPriv) != intel)
DRI_VALIDATE_DRAWABLE_INFO(intel->driScreen, dPriv);
if (dPriv && dPriv->numClipRects) {
const int srcWidth = surf->width;
const int srcHeight = surf->height;
const int nbox = dPriv->numClipRects;
const drm_clip_rect_t *pbox = dPriv->pClipRects;
const int pitch = intelScreen->front.pitch / intelScreen->front.cpp;
const int cpp = intelScreen->front.cpp;
const int srcpitch = surf->pitch;
int BR13, CMD;
int i;
ASSERT(surf->buffer);
ASSERT(surf->cpp == cpp);
DBG(SWAP, "screen pitch %d src surface pitch %d\n",
pitch, surf->pitch);
if (cpp == 2) {
BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
CMD = XY_SRC_COPY_BLT_CMD;
}
else {
BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
}
for (i = 0; i < nbox; i++, pbox++) {
drm_clip_rect_t box;
drm_clip_rect_t sbox;
if (pbox->x1 > pbox->x2 ||
pbox->y1 > pbox->y2 ||
pbox->x2 > intelScreen->front.width ||
pbox->y2 > intelScreen->front.height) {
/* invalid cliprect, skip it */
continue;
}
box = *pbox;
if (rect) {
/* intersect cliprect with user-provided src rect */
drm_clip_rect_t rrect;
rrect.x1 = dPriv->x + rect->x1;
rrect.y1 = (dPriv->h - rect->y1 - rect->y2) + dPriv->y;
rrect.x2 = rect->x2 + rrect.x1;
rrect.y2 = rect->y2 + rrect.y1;
if (rrect.x1 > box.x1)
box.x1 = rrect.x1;
if (rrect.y1 > box.y1)
box.y1 = rrect.y1;
if (rrect.x2 < box.x2)
box.x2 = rrect.x2;
if (rrect.y2 < box.y2)
box.y2 = rrect.y2;
if (box.x1 > box.x2 || box.y1 > box.y2)
continue;
}
/* restrict blit to size of actually rendered area */
if (box.x2 - box.x1 > srcWidth)
box.x2 = srcWidth + box.x1;
if (box.y2 - box.y1 > srcHeight)
box.y2 = srcHeight + box.y1;
DBG(SWAP, "box x1 x2 y1 y2 %d %d %d %d\n",
box.x1, box.x2, box.y1, box.y2);
sbox.x1 = box.x1 - dPriv->x;
sbox.y1 = box.y1 - dPriv->y;
assert(box.x1 < box.x2);
assert(box.y1 < box.y2);
/* XXX this could be done with pipe->surface_copy() */
BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((box.y1 << 16) | box.x1);
OUT_BATCH((box.y2 << 16) | box.x2);
OUT_RELOC(intelScreen->front.buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, 0);
OUT_BATCH((sbox.y1 << 16) | sbox.x1);
OUT_BATCH((srcpitch * cpp) & 0xffff);
OUT_RELOC(dri_bo(surf->buffer),
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
DRM_BO_MASK_MEM | DRM_BO_FLAG_READ, 0);
ADVANCE_BATCH();
}
if (intel->first_swap_fence)
driFenceUnReference(&intel->first_swap_fence);
intel->first_swap_fence = intel_batchbuffer_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);
if (intel->lastStamp != dPriv->lastStamp) {
intelUpdateWindowSize(dPriv);
intel->lastStamp = dPriv->lastStamp;
}
}
/**
* This will be called whenever the currently bound window is moved/resized.
*/
void
intelUpdateWindowSize(__DRIdrawablePrivate *dPriv)
{
struct intel_framebuffer *intelfb = intel_framebuffer(dPriv);
assert(intelfb->stfb);
st_resize_framebuffer(intelfb->stfb, dPriv->w, dPriv->h);
}
void
intelSwapBuffers(__DRIdrawablePrivate * dPriv)
{
struct intel_framebuffer *intel_fb = intel_framebuffer(dPriv);
struct pipe_surface *back_surf;
assert(intel_fb);
assert(intel_fb->stfb);
back_surf = st_get_framebuffer_surface(intel_fb->stfb,
ST_SURFACE_BACK_LEFT);
if (back_surf) {
st_notify_swapbuffers(intel_fb->stfb);
intelDisplaySurface(dPriv, back_surf, NULL);
st_notify_swapbuffers_complete(intel_fb->stfb);
}
}
/**
* Called via glXCopySubBufferMESA() to copy a subrect of the back
* buffer to the front buffer/screen.
*/
void
intelCopySubBuffer(__DRIdrawablePrivate * dPriv, int x, int y, int w, int h)
{
struct intel_framebuffer *intel_fb = intel_framebuffer(dPriv);
struct pipe_surface *back_surf;
assert(intel_fb);
assert(intel_fb->stfb);
back_surf = st_get_framebuffer_surface(intel_fb->stfb,
ST_SURFACE_BACK_LEFT);
if (back_surf) {
drm_clip_rect_t rect;
rect.x1 = x;
rect.y1 = y;
rect.x2 = w;
rect.y2 = h;
st_notify_swapbuffers(intel_fb->stfb);
intelDisplaySurface(dPriv, back_surf, &rect);
}
}
#endif
@@ -1,6 +1,6 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -25,13 +25,23 @@
*
**************************************************************************/
#ifndef INTEL_BATCHPOOL_H
#define INTEL_BATCHPOOL_H
extern struct _DriBufferPool *driBatchPoolInit(int fd, unsigned flags,
unsigned long bufSize,
unsigned numBufs,
unsigned checkDelayed);
#ifndef INTEL_SWAPBUFFERS_H
#define INTEL_SWAPBUFFERS_H
#endif /* INTEL_BATCHPOOL_H */
struct pipe_surface;
#if 0
extern void intelDisplaySurface(__DRIdrawablePrivate * dPriv,
struct pipe_surface *surf,
const drm_clip_rect_t * rect);
extern void intelSwapBuffers(__DRIdrawablePrivate * dPriv);
extern void intelCopySubBuffer(__DRIdrawablePrivate * dPriv,
int x, int y, int w, int h);
extern void intelUpdateWindowSize(__DRIdrawablePrivate *dPriv);
#endif
#endif /* INTEL_SWAPBUFFERS_H */
@@ -0,0 +1,73 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_WINSYS_H
#define INTEL_WINSYS_H
#include "pipe/p_state.h"
struct intel_context;
struct pipe_context;
struct pipe_winsys;
struct pipe_buffer;
struct _DriBufferObject;
struct pipe_winsys *
intel_create_pipe_winsys( int fd, struct _DriFreeSlabManager *fMan );
void
intel_destroy_pipe_winsys( struct pipe_winsys *winsys );
struct pipe_context *
intel_create_softpipe( struct intel_context *intel,
struct pipe_winsys *winsys );
struct pipe_context *
intel_create_i915simple( struct intel_context *intel,
struct pipe_winsys *winsys );
struct intel_buffer {
struct pipe_buffer base;
struct _DriBufferObject *driBO;
};
static INLINE struct intel_buffer *
intel_buffer( struct pipe_buffer *buf )
{
return (struct intel_buffer *)buf;
}
static INLINE struct _DriBufferObject *
dri_bo( struct pipe_buffer *buf )
{
return intel_buffer(buf)->driBO;
}
#endif
@@ -0,0 +1,184 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Keith Whitwell <keithw-at-tungstengraphics-dot-com>
*/
#include <stdlib.h>
#include <xf86drm.h>
#include "ws_dri_bufpool.h"
#include "ws_dri_bufmgr.h"
#include "intel_context.h"
#include "intel_batchbuffer.h"
#include "intel_winsys.h"
#include "pipe/p_util.h"
#include "pipe/p_winsys.h"
#include "i915simple/i915_winsys.h"
#include "i915simple/i915_screen.h"
struct intel_i915_winsys {
struct i915_winsys winsys; /**< batch buffer funcs */
struct pipe_winsys *pws;
struct intel_context *intel;
};
/* Turn a i915simple winsys into an intel/i915simple winsys:
*/
static inline struct intel_i915_winsys *
intel_i915_winsys( struct i915_winsys *sws )
{
return (struct intel_i915_winsys *)sws;
}
/* Simple batchbuffer interface:
*/
static unsigned *intel_i915_batch_start( struct i915_winsys *sws,
unsigned dwords,
unsigned relocs )
{
struct intel_context *intel = intel_i915_winsys(sws)->intel;
/* XXX: check relocs.
*/
if (intel_batchbuffer_space( intel->batch ) >= dwords * 4) {
/* XXX: Hmm, the driver can't really do much with this pointer:
*/
return (unsigned *)intel->batch->ptr;
}
else
return NULL;
}
static void intel_i915_batch_dword( struct i915_winsys *sws,
unsigned dword )
{
struct intel_context *intel = intel_i915_winsys(sws)->intel;
intel_batchbuffer_emit_dword( intel->batch, dword );
}
static void intel_i915_batch_reloc( struct i915_winsys *sws,
struct pipe_buffer *buf,
unsigned access_flags,
unsigned delta )
{
struct intel_context *intel = intel_i915_winsys(sws)->intel;
unsigned flags = DRM_BO_FLAG_MEM_TT;
unsigned mask = DRM_BO_MASK_MEM;
if (access_flags & I915_BUFFER_ACCESS_WRITE) {
flags |= DRM_BO_FLAG_WRITE;
mask |= DRM_BO_FLAG_WRITE;
}
if (access_flags & I915_BUFFER_ACCESS_READ) {
flags |= DRM_BO_FLAG_READ;
mask |= DRM_BO_FLAG_READ;
}
#if 0 /* JB old */
intel_batchbuffer_emit_reloc( intel->batch,
dri_bo( buf ),
flags, mask,
delta );
#else /* new */
intel_offset_relocation( intel->batch,
delta,
dri_bo( buf ),
flags,
mask );
#endif
}
static void intel_i915_batch_flush( struct i915_winsys *sws,
struct pipe_fence_handle **fence )
{
struct intel_i915_winsys *iws = intel_i915_winsys(sws);
struct intel_context *intel = iws->intel;
union {
struct _DriFenceObject *dri;
struct pipe_fence_handle *pipe;
} fu;
if (fence)
assert(!*fence);
fu.dri = intel_batchbuffer_flush( intel->batch );
if (!fu.dri) {
assert(0);
*fence = NULL;
return;
}
if (fu.dri) {
if (fence)
*fence = fu.pipe;
else
driFenceUnReference(&fu.dri);
}
}
/**
* Create i915 hardware rendering context.
*/
struct pipe_context *
intel_create_i915simple( struct intel_context *intel,
struct pipe_winsys *winsys )
{
struct intel_i915_winsys *iws = CALLOC_STRUCT( intel_i915_winsys );
struct pipe_screen *screen;
/* Fill in this struct with callbacks that i915simple will need to
* communicate with the window system, buffer manager, etc.
*/
iws->winsys.batch_start = intel_i915_batch_start;
iws->winsys.batch_dword = intel_i915_batch_dword;
iws->winsys.batch_reloc = intel_i915_batch_reloc;
iws->winsys.batch_flush = intel_i915_batch_flush;
iws->pws = winsys;
iws->intel = intel;
screen = i915_create_screen(winsys, PCI_CHIP_I945_GM);
assert(screen);
/* Create the i915simple context:
*/
return i915_create_context( screen,
winsys,
&iws->winsys );
}
@@ -0,0 +1,338 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Keith Whitwell <keithw-at-tungstengraphics-dot-com>
*/
#include <stdlib.h>
#include <xf86drm.h>
//#include "dri_bufpool.h"
//#include "dri_bufmgr.h"
#include "intel_context.h"
#include "intel_winsys.h"
#include "intel_swapbuffers.h"
#include "intel_batchbuffer.h"
#include "pipe/p_winsys.h"
#include "pipe/p_defines.h"
#include "pipe/p_state.h"
#include "pipe/p_util.h"
#include "pipe/p_inlines.h"
struct intel_pipe_winsys {
struct pipe_winsys winsys;
struct _DriBufferPool *regionPool;
struct _DriFreeSlabManager *fMan;
};
/* Turn a pipe winsys into an intel/pipe winsys:
*/
static inline struct intel_pipe_winsys *
intel_pipe_winsys( struct pipe_winsys *winsys )
{
return (struct intel_pipe_winsys *)winsys;
}
/* Most callbacks map direcly onto dri_bufmgr operations:
*/
static void *intel_buffer_map(struct pipe_winsys *winsys,
struct pipe_buffer *buf,
unsigned flags )
{
unsigned drm_flags = 0;
if (flags & PIPE_BUFFER_USAGE_CPU_WRITE)
drm_flags |= DRM_BO_FLAG_WRITE;
if (flags & PIPE_BUFFER_USAGE_CPU_READ)
drm_flags |= DRM_BO_FLAG_READ;
return driBOMap( dri_bo(buf), drm_flags, 0 );
}
static void intel_buffer_unmap(struct pipe_winsys *winsys,
struct pipe_buffer *buf)
{
driBOUnmap( dri_bo(buf) );
}
static void
intel_buffer_destroy(struct pipe_winsys *winsys,
struct pipe_buffer *buf)
{
driBOUnReference( dri_bo(buf) );
FREE(buf);
}
/* Pipe has no concept of pools. We choose the tex/region pool
* for all buffers.
* Grabs the hardware lock!
*/
static struct pipe_buffer *
intel_buffer_create(struct pipe_winsys *winsys,
unsigned alignment,
unsigned usage,
unsigned size )
{
struct intel_buffer *buffer = CALLOC_STRUCT( intel_buffer );
struct intel_pipe_winsys *iws = intel_pipe_winsys(winsys);
unsigned flags = 0;
buffer->base.refcount = 1;
buffer->base.alignment = alignment;
buffer->base.usage = usage;
buffer->base.size = size;
if (usage & (PIPE_BUFFER_USAGE_VERTEX /*| IWS_BUFFER_USAGE_LOCAL*/)) {
flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
} else {
flags |= DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MEM_TT;
}
if (usage & PIPE_BUFFER_USAGE_GPU_READ)
flags |= DRM_BO_FLAG_READ;
if (usage & PIPE_BUFFER_USAGE_GPU_WRITE)
flags |= DRM_BO_FLAG_WRITE;
/* drm complains if we don't set any read/write flags.
*/
if ((flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) == 0)
flags |= DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE;
#if 0
if (flags & IWS_BUFFER_USAGE_EXE)
flags |= DRM_BO_FLAG_EXE;
if (usage & IWS_BUFFER_USAGE_CACHED)
flags |= DRM_BO_FLAG_CACHED;
#endif
driGenBuffers( iws->regionPool,
"pipe buffer", 1, &buffer->driBO, alignment, flags, 0 );
driBOData( buffer->driBO, size, NULL, iws->regionPool, 0 );
return &buffer->base;
}
static struct pipe_buffer *
intel_user_buffer_create(struct pipe_winsys *winsys, void *ptr, unsigned bytes)
{
struct intel_buffer *buffer = CALLOC_STRUCT( intel_buffer );
struct intel_pipe_winsys *iws = intel_pipe_winsys(winsys);
driGenUserBuffer( iws->regionPool,
"pipe user buffer", &buffer->driBO, ptr, bytes );
buffer->base.refcount = 1;
return &buffer->base;
}
/* The state tracker (should!) keep track of whether the fake
* frontbuffer has been touched by any rendering since the last time
* we copied its contents to the real frontbuffer. Our task is easy:
*/
static void
intel_flush_frontbuffer( struct pipe_winsys *winsys,
struct pipe_surface *surf,
void *context_private)
{
#if 0
struct intel_context *intel = (struct intel_context *) context_private;
__DRIdrawablePrivate *dPriv = intel->driDrawable;
intelDisplaySurface(dPriv, surf, NULL);
#endif
}
static struct pipe_surface *
intel_i915_surface_alloc(struct pipe_winsys *winsys)
{
struct pipe_surface *surf = CALLOC_STRUCT(pipe_surface);
if (surf) {
surf->refcount = 1;
surf->winsys = winsys;
}
return surf;
}
/**
* Round n up to next multiple.
*/
static INLINE unsigned
round_up(unsigned n, unsigned multiple)
{
return (n + multiple - 1) & ~(multiple - 1);
}
/**
* Copied from xm_winsys.c
*/
static int
intel_i915_surface_alloc_storage(struct pipe_winsys *winsys,
struct pipe_surface *surf,
unsigned width, unsigned height,
enum pipe_format format,
unsigned flags)
{
const unsigned alignment = 64;
//int ret;
surf->width = width;
surf->height = height;
surf->format = format;
surf->cpp = pf_get_size(format);
surf->pitch = round_up(width, alignment / surf->cpp);
assert(!surf->buffer);
surf->buffer = winsys->buffer_create(winsys, alignment,
PIPE_BUFFER_USAGE_PIXEL,
surf->pitch * surf->cpp * height);
if(!surf->buffer)
return -1;
return 0;
}
static void
intel_i915_surface_release(struct pipe_winsys *winsys, struct pipe_surface **s)
{
struct pipe_surface *surf = *s;
surf->refcount--;
if (surf->refcount == 0) {
if (surf->buffer)
pipe_buffer_reference(winsys, &surf->buffer, NULL);
free(surf);
}
*s = NULL;
}
static const char *
intel_get_name( struct pipe_winsys *winsys )
{
return "Intel/EGL/ttm";
}
static void
intel_fence_reference( struct pipe_winsys *sws,
struct pipe_fence_handle **ptr,
struct pipe_fence_handle *fence )
{
if (*ptr)
driFenceUnReference((struct _DriFenceObject **)ptr);
if (fence)
*ptr = (struct pipe_fence_handle *)driFenceReference((struct _DriFenceObject *)fence);
}
static int
intel_fence_signalled( struct pipe_winsys *sws,
struct pipe_fence_handle *fence,
unsigned flag )
{
return driFenceSignaled((struct _DriFenceObject *)fence, flag);
}
static int
intel_fence_finish( struct pipe_winsys *sws,
struct pipe_fence_handle *fence,
unsigned flag )
{
/* JB: Lets allways lazy wait */
return driFenceFinish((struct _DriFenceObject *)fence, flag, 1);
}
struct pipe_winsys *
intel_create_pipe_winsys( int fd, struct _DriFreeSlabManager *fMan )
{
struct intel_pipe_winsys *iws = CALLOC_STRUCT( intel_pipe_winsys );
/* Fill in this struct with callbacks that pipe will need to
* communicate with the window system, buffer manager, etc.
*
* Pipe would be happy with a malloc based memory manager, but
* the SwapBuffers implementation in this winsys driver requires
* that rendering be done to an appropriate _DriBufferObject.
*/
iws->winsys.buffer_create = intel_buffer_create;
iws->winsys.user_buffer_create = intel_user_buffer_create;
iws->winsys.buffer_map = intel_buffer_map;
iws->winsys.buffer_unmap = intel_buffer_unmap;
iws->winsys.buffer_destroy = intel_buffer_destroy;
iws->winsys.flush_frontbuffer = intel_flush_frontbuffer;
iws->winsys.get_name = intel_get_name;
iws->winsys.surface_alloc = intel_i915_surface_alloc;
iws->winsys.surface_alloc_storage = intel_i915_surface_alloc_storage;
iws->winsys.surface_release = intel_i915_surface_release;
iws->winsys.fence_reference = intel_fence_reference;
iws->winsys.fence_signalled = intel_fence_signalled;
iws->winsys.fence_finish = intel_fence_finish;
if (fd)
iws->regionPool = driSlabPoolInit(fd,
DRM_BO_FLAG_READ |
DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_MEM_TT,
DRM_BO_FLAG_READ |
DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_MEM_TT,
64, 6, 16, 4096, 0,
fMan);
return &iws->winsys;
}
void
intel_destroy_pipe_winsys( struct pipe_winsys *winsys )
{
struct intel_pipe_winsys *iws = intel_pipe_winsys(winsys);
if (iws->regionPool) {
driPoolTakeDown(iws->regionPool);
}
free(iws);
}
@@ -0,0 +1,82 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Keith Whitwell <keithw-at-tungstengraphics-dot-com>
*/
#include "intel_context.h"
#include "intel_winsys.h"
#include "pipe/p_defines.h"
#include "pipe/p_util.h"
#include "pipe/p_format.h"
#include "softpipe/sp_winsys.h"
struct intel_softpipe_winsys {
struct softpipe_winsys sws;
struct intel_context *intel;
};
/**
* Return list of surface formats supported by this driver.
*/
static boolean
intel_is_format_supported(struct softpipe_winsys *sws,
enum pipe_format format)
{
switch(format) {
case PIPE_FORMAT_A8R8G8B8_UNORM:
case PIPE_FORMAT_R5G6B5_UNORM:
case PIPE_FORMAT_S8Z24_UNORM:
return TRUE;
default:
return FALSE;
}
}
/**
* Create rendering context which uses software rendering.
*/
struct pipe_context *
intel_create_softpipe( struct intel_context *intel,
struct pipe_winsys *winsys )
{
struct intel_softpipe_winsys *isws = CALLOC_STRUCT( intel_softpipe_winsys );
struct pipe_screen *screen = softpipe_create_screen(winsys);
/* Fill in this struct with callbacks that softpipe will need to
* communicate with the window system, buffer manager, etc.
*/
isws->sws.is_format_supported = intel_is_format_supported;
isws->intel = intel;
/* Create the softpipe context:
*/
return softpipe_create( screen, winsys, &isws->sws );
}
@@ -0,0 +1,953 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
*/
#include <xf86drm.h>
#include <stdlib.h>
#include "glthread.h"
#include "errno.h"
#include "ws_dri_bufmgr.h"
#include "string.h"
#include "imports.h"
#include "ws_dri_bufpool.h"
#include "ws_dri_fencemgr.h"
/*
* This lock is here to protect drmBO structs changing underneath us during a
* validate list call, since validatelist cannot take individiual locks for
* each drmBO. Validatelist takes this lock in write mode. Any access to an
* individual drmBO should take this lock in read mode, since in that case, the
* driBufferObject mutex will protect the access. Locking order is
* driBufferObject mutex - > this rw lock.
*/
_glthread_DECLARE_STATIC_MUTEX(bmMutex);
_glthread_DECLARE_STATIC_COND(bmCond);
static int kernelReaders = 0;
static int num_buffers = 0;
static int num_user_buffers = 0;
static drmBO *drmBOListBuf(void *iterator)
{
drmBONode *node;
drmMMListHead *l = (drmMMListHead *) iterator;
node = DRMLISTENTRY(drmBONode, l, head);
return node->buf;
}
static void *drmBOListIterator(drmBOList *list)
{
void *ret = list->list.next;
if (ret == &list->list)
return NULL;
return ret;
}
static void *drmBOListNext(drmBOList *list, void *iterator)
{
void *ret;
drmMMListHead *l = (drmMMListHead *) iterator;
ret = l->next;
if (ret == &list->list)
return NULL;
return ret;
}
static drmBONode *drmAddListItem(drmBOList *list, drmBO *item,
uint64_t arg0,
uint64_t arg1)
{
drmBONode *node;
drmMMListHead *l;
l = list->free.next;
if (l == &list->free) {
node = (drmBONode *) malloc(sizeof(*node));
if (!node) {
return NULL;
}
list->numCurrent++;
}
else {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
}
node->buf = item;
node->arg0 = arg0;
node->arg1 = arg1;
DRMLISTADD(&node->head, &list->list);
list->numOnList++;
return node;
}
static int drmAddValidateItem(drmBOList *list, drmBO *buf, uint64_t flags,
uint64_t mask, int *newItem)
{
drmBONode *node, *cur;
drmMMListHead *l;
*newItem = 0;
cur = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
if (node->buf == buf) {
cur = node;
break;
}
}
if (!cur) {
cur = drmAddListItem(list, buf, flags, mask);
if (!cur) {
return -ENOMEM;
}
*newItem = 1;
cur->arg0 = flags;
cur->arg1 = mask;
}
else {
uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
return -EINVAL;
}
cur->arg1 |= mask;
cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
(cur->arg0 & DRM_BO_MASK_MEM) == 0) {
return -EINVAL;
}
}
return 0;
}
static void drmBOFreeList(drmBOList *list)
{
drmBONode *node;
drmMMListHead *l;
l = list->list.next;
while(l != &list->list) {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
free(node);
l = list->list.next;
list->numCurrent--;
list->numOnList--;
}
l = list->free.next;
while(l != &list->free) {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
free(node);
l = list->free.next;
list->numCurrent--;
}
}
static int drmAdjustListNodes(drmBOList *list)
{
drmBONode *node;
drmMMListHead *l;
int ret = 0;
while(list->numCurrent < list->numTarget) {
node = (drmBONode *) malloc(sizeof(*node));
if (!node) {
ret = -ENOMEM;
break;
}
list->numCurrent++;
DRMLISTADD(&node->head, &list->free);
}
while(list->numCurrent > list->numTarget) {
l = list->free.next;
if (l == &list->free)
break;
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
free(node);
list->numCurrent--;
}
return ret;
}
static int drmBOCreateList(int numTarget, drmBOList *list)
{
DRMINITLISTHEAD(&list->list);
DRMINITLISTHEAD(&list->free);
list->numTarget = numTarget;
list->numCurrent = 0;
list->numOnList = 0;
return drmAdjustListNodes(list);
}
static int drmBOResetList(drmBOList *list)
{
drmMMListHead *l;
int ret;
ret = drmAdjustListNodes(list);
if (ret)
return ret;
l = list->list.next;
while (l != &list->list) {
DRMLISTDEL(l);
DRMLISTADD(l, &list->free);
list->numOnList--;
l = list->list.next;
}
return drmAdjustListNodes(list);
}
void driWriteLockKernelBO(void)
{
_glthread_LOCK_MUTEX(bmMutex);
while(kernelReaders != 0)
_glthread_COND_WAIT(bmCond, bmMutex);
}
void driWriteUnlockKernelBO(void)
{
_glthread_UNLOCK_MUTEX(bmMutex);
}
void driReadLockKernelBO(void)
{
_glthread_LOCK_MUTEX(bmMutex);
kernelReaders++;
_glthread_UNLOCK_MUTEX(bmMutex);
}
void driReadUnlockKernelBO(void)
{
_glthread_LOCK_MUTEX(bmMutex);
if (--kernelReaders == 0)
_glthread_COND_BROADCAST(bmCond);
_glthread_UNLOCK_MUTEX(bmMutex);
}
/*
* TODO: Introduce fence pools in the same way as
* buffer object pools.
*/
typedef struct _DriBufferObject
{
DriBufferPool *pool;
_glthread_Mutex mutex;
int refCount;
const char *name;
uint64_t flags;
unsigned hint;
unsigned alignment;
unsigned createdByReference;
void *private;
/* user-space buffer: */
unsigned userBuffer;
void *userData;
unsigned userSize;
} DriBufferObject;
typedef struct _DriBufferList {
drmBOList drmBuffers; /* List of kernel buffers needing validation */
drmBOList driBuffers; /* List of user-space buffers needing validation */
} DriBufferList;
void
bmError(int val, const char *file, const char *function, int line)
{
_mesa_printf("Fatal video memory manager error \"%s\".\n"
"Check kernel logs or set the LIBGL_DEBUG\n"
"environment variable to \"verbose\" for more info.\n"
"Detected in file %s, line %d, function %s.\n",
strerror(-val), file, line, function);
#ifndef NDEBUG
abort();
#else
abort();
#endif
}
extern drmBO *
driBOKernel(struct _DriBufferObject *buf)
{
drmBO *ret;
driReadLockKernelBO();
_glthread_LOCK_MUTEX(buf->mutex);
assert(buf->private != NULL);
ret = buf->pool->kernel(buf->pool, buf->private);
if (!ret)
BM_CKFATAL(-EINVAL);
_glthread_UNLOCK_MUTEX(buf->mutex);
driReadUnlockKernelBO();
return ret;
}
void
driBOWaitIdle(struct _DriBufferObject *buf, int lazy)
{
/*
* This function may block. Is it sane to keep the mutex held during
* that time??
*/
_glthread_LOCK_MUTEX(buf->mutex);
BM_CKFATAL(buf->pool->waitIdle(buf->pool, buf->private, &buf->mutex, lazy));
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void *
driBOMap(struct _DriBufferObject *buf, unsigned flags, unsigned hint)
{
void *virtual;
int retval;
if (buf->userBuffer) {
return buf->userData;
}
_glthread_LOCK_MUTEX(buf->mutex);
assert(buf->private != NULL);
retval = buf->pool->map(buf->pool, buf->private, flags, hint,
&buf->mutex, &virtual);
_glthread_UNLOCK_MUTEX(buf->mutex);
return retval == 0 ? virtual : NULL;
}
void
driBOUnmap(struct _DriBufferObject *buf)
{
if (buf->userBuffer)
return;
assert(buf->private != NULL);
_glthread_LOCK_MUTEX(buf->mutex);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
_glthread_UNLOCK_MUTEX(buf->mutex);
}
unsigned long
driBOOffset(struct _DriBufferObject *buf)
{
unsigned long ret;
assert(buf->private != NULL);
_glthread_LOCK_MUTEX(buf->mutex);
ret = buf->pool->offset(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
return ret;
}
unsigned long
driBOPoolOffset(struct _DriBufferObject *buf)
{
unsigned long ret;
assert(buf->private != NULL);
_glthread_LOCK_MUTEX(buf->mutex);
ret = buf->pool->poolOffset(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
return ret;
}
uint64_t
driBOFlags(struct _DriBufferObject *buf)
{
uint64_t ret;
assert(buf->private != NULL);
driReadLockKernelBO();
_glthread_LOCK_MUTEX(buf->mutex);
ret = buf->pool->flags(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
driReadUnlockKernelBO();
return ret;
}
struct _DriBufferObject *
driBOReference(struct _DriBufferObject *buf)
{
_glthread_LOCK_MUTEX(buf->mutex);
if (++buf->refCount == 1) {
_glthread_UNLOCK_MUTEX(buf->mutex);
BM_CKFATAL(-EINVAL);
}
_glthread_UNLOCK_MUTEX(buf->mutex);
return buf;
}
void
driBOUnReference(struct _DriBufferObject *buf)
{
int tmp;
if (!buf)
return;
_glthread_LOCK_MUTEX(buf->mutex);
tmp = --buf->refCount;
if (!tmp) {
_glthread_UNLOCK_MUTEX(buf->mutex);
if (buf->private) {
if (buf->createdByReference)
buf->pool->unreference(buf->pool, buf->private);
else
buf->pool->destroy(buf->pool, buf->private);
}
if (buf->userBuffer)
num_user_buffers--;
else
num_buffers--;
free(buf);
} else
_glthread_UNLOCK_MUTEX(buf->mutex);
}
int
driBOData(struct _DriBufferObject *buf,
unsigned size, const void *data,
DriBufferPool *newPool,
uint64_t flags)
{
void *virtual = NULL;
int newBuffer;
int retval = 0;
struct _DriBufferPool *pool;
assert(!buf->userBuffer); /* XXX just do a memcpy? */
_glthread_LOCK_MUTEX(buf->mutex);
pool = buf->pool;
if (pool == NULL && newPool != NULL) {
buf->pool = newPool;
pool = newPool;
}
if (newPool == NULL)
newPool = pool;
if (!pool->create) {
_mesa_error(NULL, GL_INVALID_OPERATION,
"driBOData called on invalid buffer\n");
BM_CKFATAL(-EINVAL);
}
newBuffer = (!buf->private || pool != newPool ||
pool->size(pool, buf->private) < size);
if (!flags)
flags = buf->flags;
if (newBuffer) {
if (buf->createdByReference) {
_mesa_error(NULL, GL_INVALID_OPERATION,
"driBOData requiring resizing called on "
"shared buffer.\n");
BM_CKFATAL(-EINVAL);
}
if (buf->private)
buf->pool->destroy(buf->pool, buf->private);
pool = newPool;
buf->pool = newPool;
buf->private = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
buf->alignment);
if (!buf->private)
retval = -ENOMEM;
if (retval == 0)
retval = pool->map(pool, buf->private,
DRM_BO_FLAG_WRITE,
DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual);
} else if (pool->map(pool, buf->private, DRM_BO_FLAG_WRITE,
DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual)) {
/*
* Buffer is busy. need to create a new one.
*/
void *newBuf;
newBuf = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
buf->alignment);
if (newBuf) {
buf->pool->destroy(buf->pool, buf->private);
buf->private = newBuf;
}
retval = pool->map(pool, buf->private,
DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
} else {
uint64_t flag_diff = flags ^ buf->flags;
/*
* We might need to change buffer flags.
*/
if (flag_diff){
assert(pool->setStatus != NULL);
BM_CKFATAL(pool->unmap(pool, buf->private));
BM_CKFATAL(pool->setStatus(pool, buf->private, flag_diff,
buf->flags));
if (!data)
goto out;
retval = pool->map(pool, buf->private,
DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
}
}
if (retval == 0) {
if (data)
memcpy(virtual, data, size);
BM_CKFATAL(pool->unmap(pool, buf->private));
}
out:
_glthread_UNLOCK_MUTEX(buf->mutex);
return retval;
}
void
driBOSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size, const void *data)
{
void *virtual;
assert(!buf->userBuffer); /* XXX just do a memcpy? */
_glthread_LOCK_MUTEX(buf->mutex);
if (size && data) {
BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
DRM_BO_FLAG_WRITE, 0, &buf->mutex,
&virtual));
memcpy((unsigned char *) virtual + offset, data, size);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
}
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void
driBOGetSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size, void *data)
{
void *virtual;
assert(!buf->userBuffer); /* XXX just do a memcpy? */
_glthread_LOCK_MUTEX(buf->mutex);
if (size && data) {
BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
DRM_BO_FLAG_READ, 0, &buf->mutex, &virtual));
memcpy(data, (unsigned char *) virtual + offset, size);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
}
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void
driBOSetReferenced(struct _DriBufferObject *buf,
unsigned long handle)
{
_glthread_LOCK_MUTEX(buf->mutex);
if (buf->private != NULL) {
_mesa_error(NULL, GL_INVALID_OPERATION,
"Invalid buffer for setReferenced\n");
BM_CKFATAL(-EINVAL);
}
if (buf->pool->reference == NULL) {
_mesa_error(NULL, GL_INVALID_OPERATION,
"Invalid buffer pool for setReferenced\n");
BM_CKFATAL(-EINVAL);
}
buf->private = buf->pool->reference(buf->pool, handle);
if (!buf->private) {
_mesa_error(NULL, GL_OUT_OF_MEMORY,
"Invalid buffer pool for setStatic\n");
BM_CKFATAL(-ENOMEM);
}
buf->createdByReference = GL_TRUE;
buf->flags = buf->pool->kernel(buf->pool, buf->private)->flags;
_glthread_UNLOCK_MUTEX(buf->mutex);
}
int
driGenBuffers(struct _DriBufferPool *pool,
const char *name,
unsigned n,
struct _DriBufferObject *buffers[],
unsigned alignment, uint64_t flags, unsigned hint)
{
struct _DriBufferObject *buf;
int i;
flags = (flags) ? flags : DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM |
DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE;
++num_buffers;
assert(pool);
for (i = 0; i < n; ++i) {
buf = (struct _DriBufferObject *) calloc(1, sizeof(*buf));
if (!buf)
return -ENOMEM;
_glthread_INIT_MUTEX(buf->mutex);
_glthread_LOCK_MUTEX(buf->mutex);
buf->refCount = 1;
buf->flags = flags;
buf->hint = hint;
buf->name = name;
buf->alignment = alignment;
buf->pool = pool;
buf->createdByReference = 0;
_glthread_UNLOCK_MUTEX(buf->mutex);
buffers[i] = buf;
}
return 0;
}
void
driGenUserBuffer(struct _DriBufferPool *pool,
const char *name,
struct _DriBufferObject **buffers,
void *ptr, unsigned bytes)
{
const unsigned alignment = 1, flags = 0, hint = 0;
--num_buffers; /* JB: is inced in GenBuffes */
driGenBuffers(pool, name, 1, buffers, alignment, flags, hint);
++num_user_buffers;
(*buffers)->userBuffer = 1;
(*buffers)->userData = ptr;
(*buffers)->userSize = bytes;
}
void
driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[])
{
int i;
for (i = 0; i < n; ++i) {
driBOUnReference(buffers[i]);
}
}
void
driInitBufMgr(int fd)
{
;
}
/*
* Note that lists are per-context and don't need mutex protection.
*/
struct _DriBufferList *
driBOCreateList(int target)
{
struct _DriBufferList *list = calloc(sizeof(*list), 1);
BM_CKFATAL(drmBOCreateList(target, &list->drmBuffers));
BM_CKFATAL(drmBOCreateList(target, &list->driBuffers));
return list;
}
int
driBOResetList(struct _DriBufferList * list)
{
int ret;
ret = drmBOResetList(&list->drmBuffers);
if (ret)
return ret;
ret = drmBOResetList(&list->driBuffers);
return ret;
}
void
driBOFreeList(struct _DriBufferList * list)
{
drmBOFreeList(&list->drmBuffers);
drmBOFreeList(&list->driBuffers);
free(list);
}
/*
* Copied from libdrm, because it is needed by driAddValidateItem.
*/
static drmBONode *
driAddListItem(drmBOList * list, drmBO * item,
uint64_t arg0, uint64_t arg1)
{
drmBONode *node;
drmMMListHead *l;
l = list->free.next;
if (l == &list->free) {
node = (drmBONode *) malloc(sizeof(*node));
if (!node) {
return NULL;
}
list->numCurrent++;
} else {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
}
memset(&node->bo_arg, 0, sizeof(node->bo_arg));
node->buf = item;
node->arg0 = arg0;
node->arg1 = arg1;
DRMLISTADDTAIL(&node->head, &list->list);
list->numOnList++;
return node;
}
/*
* Slightly modified version compared to the libdrm version.
* This one returns the list index of the buffer put on the list.
*/
static int
driAddValidateItem(drmBOList * list, drmBO * buf, uint64_t flags,
uint64_t mask, int *itemLoc,
struct _drmBONode **pnode)
{
drmBONode *node, *cur;
drmMMListHead *l;
int count = 0;
cur = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
if (node->buf == buf) {
cur = node;
break;
}
count++;
}
if (!cur) {
cur = driAddListItem(list, buf, flags, mask);
if (!cur)
return -ENOMEM;
cur->arg0 = flags;
cur->arg1 = mask;
} else {
uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
return -EINVAL;
}
cur->arg1 |= mask;
cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
(cur->arg0 & DRM_BO_MASK_MEM) == 0) {
return -EINVAL;
}
}
*itemLoc = count;
*pnode = cur;
return 0;
}
void
driBOAddListItem(struct _DriBufferList * list, struct _DriBufferObject *buf,
uint64_t flags, uint64_t mask, int *itemLoc,
struct _drmBONode **node)
{
int newItem;
_glthread_LOCK_MUTEX(buf->mutex);
BM_CKFATAL(driAddValidateItem(&list->drmBuffers,
buf->pool->kernel(buf->pool, buf->private),
flags, mask, itemLoc, node));
BM_CKFATAL(drmAddValidateItem(&list->driBuffers, (drmBO *) buf,
flags, mask, &newItem));
if (newItem)
buf->refCount++;
_glthread_UNLOCK_MUTEX(buf->mutex);
}
drmBOList *driGetdrmBOList(struct _DriBufferList *list)
{
driWriteLockKernelBO();
return &list->drmBuffers;
}
void driPutdrmBOList(struct _DriBufferList *list)
{
driWriteUnlockKernelBO();
}
void
driBOFence(struct _DriBufferObject *buf, struct _DriFenceObject *fence)
{
_glthread_LOCK_MUTEX(buf->mutex);
if (buf->pool->fence)
BM_CKFATAL(buf->pool->fence(buf->pool, buf->private, fence));
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void
driBOUnrefUserList(struct _DriBufferList *list)
{
struct _DriBufferObject *buf;
void *curBuf;
curBuf = drmBOListIterator(&list->driBuffers);
while (curBuf) {
buf = (struct _DriBufferObject *)drmBOListBuf(curBuf);
driBOUnReference(buf);
curBuf = drmBOListNext(&list->driBuffers, curBuf);
}
}
struct _DriFenceObject *
driBOFenceUserList(struct _DriFenceMgr *mgr,
struct _DriBufferList *list, const char *name,
drmFence *kFence)
{
struct _DriFenceObject *fence;
struct _DriBufferObject *buf;
void *curBuf;
fence = driFenceCreate(mgr, kFence->fence_class, kFence->type,
kFence, sizeof(*kFence));
curBuf = drmBOListIterator(&list->driBuffers);
/*
* User-space fencing callbacks.
*/
while (curBuf) {
buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
driBOFence(buf, fence);
driBOUnReference(buf);
curBuf = drmBOListNext(&list->driBuffers, curBuf);
}
driBOResetList(list);
return fence;
}
void
driBOValidateUserList(struct _DriBufferList * list)
{
void *curBuf;
struct _DriBufferObject *buf;
curBuf = drmBOListIterator(&list->driBuffers);
/*
* User-space validation callbacks.
*/
while (curBuf) {
buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
_glthread_LOCK_MUTEX(buf->mutex);
if (buf->pool->validate)
BM_CKFATAL(buf->pool->validate(buf->pool, buf->private, &buf->mutex));
_glthread_UNLOCK_MUTEX(buf->mutex);
curBuf = drmBOListNext(&list->driBuffers, curBuf);
}
}
void
driPoolTakeDown(struct _DriBufferPool *pool)
{
pool->takeDown(pool);
}
unsigned long
driBOSize(struct _DriBufferObject *buf)
{
unsigned long size;
_glthread_LOCK_MUTEX(buf->mutex);
size = buf->pool->size(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
return size;
}
drmBOList *driBOGetDRMBuffers(struct _DriBufferList *list)
{
return &list->drmBuffers;
}
drmBOList *driBOGetDRIBuffers(struct _DriBufferList *list)
{
return &list->driBuffers;
}
@@ -0,0 +1,138 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
*/
#ifndef _PSB_BUFMGR_H_
#define _PSB_BUFMGR_H_
#include <xf86mm.h>
#include "i915_drm.h"
#include "ws_dri_fencemgr.h"
typedef struct _drmBONode
{
drmMMListHead head;
drmBO *buf;
struct drm_i915_op_arg bo_arg;
uint64_t arg0;
uint64_t arg1;
} drmBONode;
typedef struct _drmBOList {
unsigned numTarget;
unsigned numCurrent;
unsigned numOnList;
drmMMListHead list;
drmMMListHead free;
} drmBOList;
struct _DriFenceObject;
struct _DriBufferObject;
struct _DriBufferPool;
struct _DriBufferList;
/*
* Return a pointer to the libdrm buffer object this DriBufferObject
* uses.
*/
extern drmBO *driBOKernel(struct _DriBufferObject *buf);
extern void *driBOMap(struct _DriBufferObject *buf, unsigned flags,
unsigned hint);
extern void driBOUnmap(struct _DriBufferObject *buf);
extern unsigned long driBOOffset(struct _DriBufferObject *buf);
extern unsigned long driBOPoolOffset(struct _DriBufferObject *buf);
extern uint64_t driBOFlags(struct _DriBufferObject *buf);
extern struct _DriBufferObject *driBOReference(struct _DriBufferObject *buf);
extern void driBOUnReference(struct _DriBufferObject *buf);
extern int driBOData(struct _DriBufferObject *r_buf,
unsigned size, const void *data,
struct _DriBufferPool *pool, uint64_t flags);
extern void driBOSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size,
const void *data);
extern void driBOGetSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size,
void *data);
extern int driGenBuffers(struct _DriBufferPool *pool,
const char *name,
unsigned n,
struct _DriBufferObject *buffers[],
unsigned alignment, uint64_t flags, unsigned hint);
extern void driGenUserBuffer(struct _DriBufferPool *pool,
const char *name,
struct _DriBufferObject *buffers[],
void *ptr, unsigned bytes);
extern void driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[]);
extern void driInitBufMgr(int fd);
extern struct _DriBufferList *driBOCreateList(int target);
extern int driBOResetList(struct _DriBufferList * list);
extern void driBOAddListItem(struct _DriBufferList * list,
struct _DriBufferObject *buf,
uint64_t flags, uint64_t mask, int *itemLoc,
struct _drmBONode **node);
extern void driBOValidateList(int fd, struct _DriBufferList * list);
extern void driBOFreeList(struct _DriBufferList * list);
extern struct _DriFenceObject *driBOFenceUserList(struct _DriFenceMgr *mgr,
struct _DriBufferList *list,
const char *name,
drmFence *kFence);
extern void driBOUnrefUserList(struct _DriBufferList *list);
extern void driBOValidateUserList(struct _DriBufferList * list);
extern drmBOList *driGetdrmBOList(struct _DriBufferList *list);
extern void driPutdrmBOList(struct _DriBufferList *list);
extern void driBOFence(struct _DriBufferObject *buf,
struct _DriFenceObject *fence);
extern void driPoolTakeDown(struct _DriBufferPool *pool);
extern void driBOSetReferenced(struct _DriBufferObject *buf,
unsigned long handle);
unsigned long driBOSize(struct _DriBufferObject *buf);
extern void driBOWaitIdle(struct _DriBufferObject *buf, int lazy);
extern void driPoolTakeDown(struct _DriBufferPool *pool);
extern void driReadLockKernelBO(void);
extern void driReadUnlockKernelBO(void);
extern void driWriteLockKernelBO(void);
extern void driWriteUnlockKernelBO(void);
/*
* For debugging purposes.
*/
extern drmBOList *driBOGetDRMBuffers(struct _DriBufferList *list);
extern drmBOList *driBOGetDRIBuffers(struct _DriBufferList *list);
#endif
@@ -0,0 +1,102 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#ifndef _PSB_BUFPOOL_H_
#define _PSB_BUFPOOL_H_
#include <xf86drm.h>
#include <glthread.h>
struct _DriFenceObject;
typedef struct _DriBufferPool
{
int fd;
int (*map) (struct _DriBufferPool * pool, void *private,
unsigned flags, int hint, _glthread_Mutex *mutex,
void **virtual);
int (*unmap) (struct _DriBufferPool * pool, void *private);
int (*destroy) (struct _DriBufferPool * pool, void *private);
unsigned long (*offset) (struct _DriBufferPool * pool, void *private);
unsigned long (*poolOffset) (struct _DriBufferPool * pool, void *private);
uint64_t (*flags) (struct _DriBufferPool * pool, void *private);
unsigned long (*size) (struct _DriBufferPool * pool, void *private);
void *(*create) (struct _DriBufferPool * pool, unsigned long size,
uint64_t flags, unsigned hint, unsigned alignment);
void *(*reference) (struct _DriBufferPool * pool, unsigned handle);
int (*unreference) (struct _DriBufferPool * pool, void *private);
int (*fence) (struct _DriBufferPool * pool, void *private,
struct _DriFenceObject * fence);
drmBO *(*kernel) (struct _DriBufferPool * pool, void *private);
int (*validate) (struct _DriBufferPool * pool, void *private, _glthread_Mutex *mutex);
int (*waitIdle) (struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex,
int lazy);
int (*setStatus) (struct _DriBufferPool *pool, void *private,
uint64_t flag_diff, uint64_t old_flags);
void (*takeDown) (struct _DriBufferPool * pool);
void *data;
} DriBufferPool;
extern void bmError(int val, const char *file, const char *function,
int line);
#define BM_CKFATAL(val) \
do{ \
int tstVal = (val); \
if (tstVal) \
bmError(tstVal, __FILE__, __FUNCTION__, __LINE__); \
} while(0);
/*
* Builtin pools.
*/
/*
* Kernel buffer objects. Size in multiples of page size. Page size aligned.
*/
extern struct _DriBufferPool *driDRMPoolInit(int fd);
extern struct _DriBufferPool *driMallocPoolInit(void);
struct _DriFreeSlabManager;
extern struct _DriBufferPool * driSlabPoolInit(int fd, uint64_t flags,
uint64_t validMask,
uint32_t smallestSize,
uint32_t numSizes,
uint32_t desiredNumBuffers,
uint32_t maxSlabSize,
uint32_t pageAlignment,
struct _DriFreeSlabManager *fMan);
extern void driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan);
extern struct _DriFreeSlabManager *
driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec);
#endif
@@ -0,0 +1,268 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <xf86drm.h>
#include <stdlib.h>
#include <unistd.h>
#include "ws_dri_bufpool.h"
#include "ws_dri_bufmgr.h"
#include "assert.h"
/*
* Buffer pool implementation using DRM buffer objects as DRI buffer objects.
*/
static void *
pool_create(struct _DriBufferPool *pool,
unsigned long size, uint64_t flags, unsigned hint,
unsigned alignment)
{
drmBO *buf = (drmBO *) malloc(sizeof(*buf));
int ret;
unsigned pageSize = getpagesize();
if (!buf)
return NULL;
if ((alignment > pageSize) && (alignment % pageSize)) {
free(buf);
return NULL;
}
ret = drmBOCreate(pool->fd, size, alignment / pageSize,
NULL,
flags, hint, buf);
if (ret) {
free(buf);
return NULL;
}
return (void *) buf;
}
static void *
pool_reference(struct _DriBufferPool *pool, unsigned handle)
{
drmBO *buf = (drmBO *) malloc(sizeof(*buf));
int ret;
if (!buf)
return NULL;
ret = drmBOReference(pool->fd, handle, buf);
if (ret) {
free(buf);
return NULL;
}
return (void *) buf;
}
static int
pool_destroy(struct _DriBufferPool *pool, void *private)
{
int ret;
drmBO *buf = (drmBO *) private;
driReadLockKernelBO();
ret = drmBOUnreference(pool->fd, buf);
free(buf);
driReadUnlockKernelBO();
return ret;
}
static int
pool_unreference(struct _DriBufferPool *pool, void *private)
{
int ret;
drmBO *buf = (drmBO *) private;
driReadLockKernelBO();
ret = drmBOUnreference(pool->fd, buf);
free(buf);
driReadUnlockKernelBO();
return ret;
}
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, _glthread_Mutex *mutex, void **virtual)
{
drmBO *buf = (drmBO *) private;
int ret;
driReadLockKernelBO();
ret = drmBOMap(pool->fd, buf, flags, hint, virtual);
driReadUnlockKernelBO();
return ret;
}
static int
pool_unmap(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
int ret;
driReadLockKernelBO();
ret = drmBOUnmap(pool->fd, buf);
driReadUnlockKernelBO();
return ret;
}
static unsigned long
pool_offset(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
unsigned long offset;
driReadLockKernelBO();
assert(buf->flags & DRM_BO_FLAG_NO_MOVE);
offset = buf->offset;
driReadUnlockKernelBO();
return buf->offset;
}
static unsigned long
pool_poolOffset(struct _DriBufferPool *pool, void *private)
{
return 0;
}
static uint64_t
pool_flags(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
uint64_t flags;
driReadLockKernelBO();
flags = buf->flags;
driReadUnlockKernelBO();
return flags;
}
static unsigned long
pool_size(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
unsigned long size;
driReadLockKernelBO();
size = buf->size;
driReadUnlockKernelBO();
return buf->size;
}
static int
pool_fence(struct _DriBufferPool *pool, void *private,
struct _DriFenceObject *fence)
{
/*
* Noop. The kernel handles all fencing.
*/
return 0;
}
static drmBO *
pool_kernel(struct _DriBufferPool *pool, void *private)
{
return (drmBO *) private;
}
static int
pool_waitIdle(struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex,
int lazy)
{
drmBO *buf = (drmBO *) private;
int ret;
driReadLockKernelBO();
ret = drmBOWaitIdle(pool->fd, buf, (lazy) ? DRM_BO_HINT_WAIT_LAZY:0);
driReadUnlockKernelBO();
return ret;
}
static void
pool_takedown(struct _DriBufferPool *pool)
{
free(pool);
}
/*static int
pool_setStatus(struct _DriBufferPool *pool, void *private,
uint64_t flag_diff, uint64_t old_flags)
{
drmBO *buf = (drmBO *) private;
uint64_t new_flags = old_flags ^ flag_diff;
int ret;
driReadLockKernelBO();
ret = drmBOSetStatus(pool->fd, buf, new_flags, flag_diff,
0, 0, 0);
driReadUnlockKernelBO();
return ret;
}*/
struct _DriBufferPool *
driDRMPoolInit(int fd)
{
struct _DriBufferPool *pool;
pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
if (!pool)
return NULL;
pool->fd = fd;
pool->map = &pool_map;
pool->unmap = &pool_unmap;
pool->destroy = &pool_destroy;
pool->offset = &pool_offset;
pool->poolOffset = &pool_poolOffset;
pool->flags = &pool_flags;
pool->size = &pool_size;
pool->create = &pool_create;
pool->fence = &pool_fence;
pool->kernel = &pool_kernel;
pool->validate = NULL;
pool->waitIdle = &pool_waitIdle;
pool->takeDown = &pool_takedown;
pool->reference = &pool_reference;
pool->unreference = &pool_unreference;
pool->data = NULL;
return pool;
}
@@ -0,0 +1,377 @@
#include "ws_dri_fencemgr.h"
#include "glthread.h"
#include <xf86mm.h>
#include <string.h>
#include <unistd.h>
/*
* Note: Locking order is
* _DriFenceObject::mutex
* _DriFenceMgr::mutex
*/
struct _DriFenceMgr {
/*
* Constant members. Need no mutex protection.
*/
struct _DriFenceMgrCreateInfo info;
void *private;
/*
* These members are protected by this->mutex
*/
_glthread_Mutex mutex;
int refCount;
drmMMListHead *heads;
int num_fences;
};
struct _DriFenceObject {
/*
* These members are constant and need no mutex protection.
*/
struct _DriFenceMgr *mgr;
uint32_t fence_class;
uint32_t fence_type;
/*
* These members are protected by mgr->mutex.
*/
drmMMListHead head;
int refCount;
/*
* These members are protected by this->mutex.
*/
_glthread_Mutex mutex;
uint32_t signaled_type;
void *private;
};
uint32_t
driFenceType(struct _DriFenceObject *fence)
{
return fence->fence_type;
}
struct _DriFenceMgr *
driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info)
{
struct _DriFenceMgr *tmp;
uint32_t i;
tmp = calloc(1, sizeof(*tmp));
if (!tmp)
return NULL;
_glthread_INIT_MUTEX(tmp->mutex);
_glthread_LOCK_MUTEX(tmp->mutex);
tmp->refCount = 1;
tmp->info = *info;
tmp->num_fences = 0;
tmp->heads = calloc(tmp->info.num_classes, sizeof(*tmp->heads));
if (!tmp->heads)
goto out_err;
for (i=0; i<tmp->info.num_classes; ++i) {
DRMINITLISTHEAD(&tmp->heads[i]);
}
_glthread_UNLOCK_MUTEX(tmp->mutex);
return tmp;
out_err:
if (tmp)
free(tmp);
return NULL;
}
static void
driFenceMgrUnrefUnlock(struct _DriFenceMgr **pMgr)
{
struct _DriFenceMgr *mgr = *pMgr;
*pMgr = NULL;
if (--mgr->refCount == 0)
free(mgr);
else
_glthread_UNLOCK_MUTEX(mgr->mutex);
}
void
driFenceMgrUnReference(struct _DriFenceMgr **pMgr)
{
_glthread_LOCK_MUTEX((*pMgr)->mutex);
driFenceMgrUnrefUnlock(pMgr);
}
static void
driFenceUnReferenceLocked(struct _DriFenceObject **pFence)
{
struct _DriFenceObject *fence = *pFence;
struct _DriFenceMgr *mgr = fence->mgr;
*pFence = NULL;
if (--fence->refCount == 0) {
DRMLISTDELINIT(&fence->head);
if (fence->private)
mgr->info.unreference(mgr, &fence->private);
--mgr->num_fences;
fence->mgr = NULL;
--mgr->refCount;
free(fence);
}
}
static void
driSignalPreviousFencesLocked(struct _DriFenceMgr *mgr,
drmMMListHead *list,
uint32_t fence_class,
uint32_t fence_type)
{
struct _DriFenceObject *entry;
drmMMListHead *prev;
while(list != &mgr->heads[fence_class]) {
entry = DRMLISTENTRY(struct _DriFenceObject, list, head);
/*
* Up refcount so that entry doesn't disappear from under us
* when we unlock-relock mgr to get the correct locking order.
*/
++entry->refCount;
_glthread_UNLOCK_MUTEX(mgr->mutex);
_glthread_LOCK_MUTEX(entry->mutex);
_glthread_LOCK_MUTEX(mgr->mutex);
prev = list->prev;
if (list->prev == list) {
/*
* Somebody else removed the entry from the list.
*/
_glthread_UNLOCK_MUTEX(entry->mutex);
driFenceUnReferenceLocked(&entry);
return;
}
entry->signaled_type |= (fence_type & entry->fence_type);
if (entry->signaled_type == entry->fence_type) {
DRMLISTDELINIT(list);
mgr->info.unreference(mgr, &entry->private);
}
_glthread_UNLOCK_MUTEX(entry->mutex);
driFenceUnReferenceLocked(&entry);
list = prev;
}
}
int
driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
int lazy_hint)
{
struct _DriFenceMgr *mgr = fence->mgr;
int ret = 0;
_glthread_LOCK_MUTEX(fence->mutex);
if ((fence->signaled_type & fence_type) == fence_type)
goto out0;
ret = mgr->info.finish(mgr, fence->private, fence_type, lazy_hint);
if (ret)
goto out0;
_glthread_LOCK_MUTEX(mgr->mutex);
_glthread_UNLOCK_MUTEX(fence->mutex);
driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
fence_type);
_glthread_UNLOCK_MUTEX(mgr->mutex);
return 0;
out0:
_glthread_UNLOCK_MUTEX(fence->mutex);
return ret;
}
uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence)
{
uint32_t ret;
_glthread_LOCK_MUTEX(fence->mutex);
ret = fence->signaled_type;
_glthread_UNLOCK_MUTEX(fence->mutex);
return ret;
}
int
driFenceSignaledType(struct _DriFenceObject *fence, uint32_t flush_type,
uint32_t *signaled)
{
int ret = 0;
struct _DriFenceMgr *mgr;
_glthread_LOCK_MUTEX(fence->mutex);
mgr = fence->mgr;
*signaled = fence->signaled_type;
if ((fence->signaled_type & flush_type) == flush_type)
goto out0;
ret = mgr->info.signaled(mgr, fence->private, flush_type, signaled);
if (ret) {
*signaled = fence->signaled_type;
goto out0;
}
if ((fence->signaled_type | *signaled) == fence->signaled_type)
goto out0;
_glthread_LOCK_MUTEX(mgr->mutex);
_glthread_UNLOCK_MUTEX(fence->mutex);
driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
*signaled);
_glthread_UNLOCK_MUTEX(mgr->mutex);
return 0;
out0:
_glthread_UNLOCK_MUTEX(fence->mutex);
return ret;
}
struct _DriFenceObject *
driFenceReference(struct _DriFenceObject *fence)
{
_glthread_LOCK_MUTEX(fence->mgr->mutex);
++fence->refCount;
_glthread_UNLOCK_MUTEX(fence->mgr->mutex);
return fence;
}
void
driFenceUnReference(struct _DriFenceObject **pFence)
{
struct _DriFenceMgr *mgr;
if (*pFence == NULL)
return;
mgr = (*pFence)->mgr;
_glthread_LOCK_MUTEX(mgr->mutex);
++mgr->refCount;
driFenceUnReferenceLocked(pFence);
driFenceMgrUnrefUnlock(&mgr);
}
struct _DriFenceObject
*driFenceCreate(struct _DriFenceMgr *mgr, uint32_t fence_class,
uint32_t fence_type, void *private, size_t private_size)
{
struct _DriFenceObject *fence;
size_t fence_size = sizeof(*fence);
if (private_size)
fence_size = ((fence_size + 15) & ~15);
fence = calloc(1, fence_size + private_size);
if (!fence) {
int ret = mgr->info.finish(mgr, private, fence_type, 0);
if (ret)
usleep(10000000);
return NULL;
}
_glthread_INIT_MUTEX(fence->mutex);
_glthread_LOCK_MUTEX(fence->mutex);
_glthread_LOCK_MUTEX(mgr->mutex);
fence->refCount = 1;
DRMLISTADDTAIL(&fence->head, &mgr->heads[fence_class]);
fence->mgr = mgr;
++mgr->refCount;
++mgr->num_fences;
_glthread_UNLOCK_MUTEX(mgr->mutex);
fence->fence_class = fence_class;
fence->fence_type = fence_type;
fence->signaled_type = 0;
fence->private = private;
if (private_size) {
fence->private = (void *)(((uint8_t *) fence) + fence_size);
memcpy(fence->private, private, private_size);
}
_glthread_UNLOCK_MUTEX(fence->mutex);
return fence;
}
static int
tSignaled(struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
uint32_t *signaled_type)
{
long fd = (long) mgr->private;
int dummy;
drmFence *fence = (drmFence *) private;
int ret;
*signaled_type = 0;
ret = drmFenceSignaled((int) fd, fence, flush_type, &dummy);
if (ret)
return ret;
*signaled_type = fence->signaled;
return 0;
}
static int
tFinish(struct _DriFenceMgr *mgr, void *private, uint32_t fence_type,
int lazy_hint)
{
long fd = (long) mgr->private;
unsigned flags = lazy_hint ? DRM_FENCE_FLAG_WAIT_LAZY : 0;
return drmFenceWait((int)fd, flags, (drmFence *) private, fence_type);
}
static int
tUnref(struct _DriFenceMgr *mgr, void **private)
{
long fd = (long) mgr->private;
drmFence *fence = (drmFence *) *private;
*private = NULL;
return drmFenceUnreference(fd, fence);
}
struct _DriFenceMgr *driFenceMgrTTMInit(int fd)
{
struct _DriFenceMgrCreateInfo info;
struct _DriFenceMgr *mgr;
info.flags = DRI_FENCE_CLASS_ORDERED;
info.num_classes = 4;
info.signaled = tSignaled;
info.finish = tFinish;
info.unreference = tUnref;
mgr = driFenceMgrCreate(&info);
if (mgr == NULL)
return NULL;
mgr->private = (void *) (long) fd;
return mgr;
}
@@ -0,0 +1,115 @@
#ifndef DRI_FENCEMGR_H
#define DRI_FENCEMGR_H
#include <stdint.h>
#include <stdlib.h>
struct _DriFenceObject;
struct _DriFenceMgr;
/*
* Do a quick check to see if the fence manager has registered the fence
* object as signaled. Note that this function may return a false negative
* answer.
*/
extern uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence);
/*
* Check if the fence object is signaled. This function can be substantially
* more expensive to call than the above function, but will not return a false
* negative answer. The argument "flush_type" sets the types that the
* underlying mechanism must make sure will eventually signal.
*/
extern int driFenceSignaledType(struct _DriFenceObject *fence,
uint32_t flush_type, uint32_t *signaled);
/*
* Convenience functions.
*/
static inline int driFenceSignaled(struct _DriFenceObject *fence,
uint32_t flush_type)
{
uint32_t signaled_types;
int ret = driFenceSignaledType(fence, flush_type, &signaled_types);
if (ret)
return 0;
return ((signaled_types & flush_type) == flush_type);
}
static inline int driFenceSignaledCached(struct _DriFenceObject *fence,
uint32_t flush_type)
{
uint32_t signaled_types =
driFenceSignaledTypeCached(fence);
return ((signaled_types & flush_type) == flush_type);
}
/*
* Reference a fence object.
*/
extern struct _DriFenceObject *driFenceReference(struct _DriFenceObject *fence);
/*
* Unreference a fence object. The fence object pointer will be reset to NULL.
*/
extern void driFenceUnReference(struct _DriFenceObject **pFence);
/*
* Wait for a fence to signal the indicated fence_type.
* If "lazy_hint" is true, it indicates that the wait may sleep to avoid
* busy-wait polling.
*/
extern int driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
int lazy_hint);
/*
* Create a DriFenceObject for manager "mgr".
*
* "private" is a pointer that should be used for the callbacks in
* struct _DriFenceMgrCreateInfo.
*
* if private_size is nonzero, then the info stored at *private, with size
* private size will be copied and the fence manager will instead use a
* pointer to the copied data for the callbacks in
* struct _DriFenceMgrCreateInfo. In that case, the object pointed to by
* "private" may be destroyed after the call to driFenceCreate.
*/
extern struct _DriFenceObject *driFenceCreate(struct _DriFenceMgr *mgr,
uint32_t fence_class,
uint32_t fence_type,
void *private,
size_t private_size);
extern uint32_t driFenceType(struct _DriFenceObject *fence);
/*
* Fence creations are ordered. If a fence signals a fence_type,
* it is safe to assume that all fences of the same class that was
* created before that fence has signaled the same type.
*/
#define DRI_FENCE_CLASS_ORDERED (1 << 0)
struct _DriFenceMgrCreateInfo {
uint32_t flags;
uint32_t num_classes;
int (*signaled) (struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
uint32_t *signaled_type);
int (*finish) (struct _DriFenceMgr *mgr, void *private, uint32_t fence_type, int lazy_hint);
int (*unreference) (struct _DriFenceMgr *mgr, void **private);
};
extern struct _DriFenceMgr *
driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info);
void
driFenceMgrUnReference(struct _DriFenceMgr **pMgr);
extern struct _DriFenceMgr *
driFenceMgrTTMInit(int fd);
#endif
@@ -0,0 +1,162 @@
/**************************************************************************
*
* Copyright 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <xf86drm.h>
#include <stdlib.h>
#include <errno.h>
#include "imports.h"
#include "glthread.h"
#include "ws_dri_bufpool.h"
#include "ws_dri_bufmgr.h"
#include "intel_screen.h"
static void *
pool_create(struct _DriBufferPool *pool,
unsigned long size, uint64_t flags, unsigned hint,
unsigned alignment)
{
unsigned long *private = malloc(size + 2*sizeof(unsigned long));
if ((flags & DRM_BO_MASK_MEM) != DRM_BO_FLAG_MEM_LOCAL)
abort();
*private = size;
return (void *)private;
}
static int
pool_destroy(struct _DriBufferPool *pool, void *private)
{
free(private);
return 0;
}
static int
pool_waitIdle(struct _DriBufferPool *pool, void *private,
_glthread_Mutex *mutex, int lazy)
{
return 0;
}
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, _glthread_Mutex *mutex, void **virtual)
{
*virtual = (void *)((unsigned long *)private + 2);
return 0;
}
static int
pool_unmap(struct _DriBufferPool *pool, void *private)
{
return 0;
}
static unsigned long
pool_offset(struct _DriBufferPool *pool, void *private)
{
/*
* BUG
*/
abort();
return 0UL;
}
static unsigned long
pool_poolOffset(struct _DriBufferPool *pool, void *private)
{
/*
* BUG
*/
abort();
}
static uint64_t
pool_flags(struct _DriBufferPool *pool, void *private)
{
return DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
}
static unsigned long
pool_size(struct _DriBufferPool *pool, void *private)
{
return *(unsigned long *) private;
}
static int
pool_fence(struct _DriBufferPool *pool, void *private,
struct _DriFenceObject *fence)
{
abort();
return 0UL;
}
static drmBO *
pool_kernel(struct _DriBufferPool *pool, void *private)
{
abort();
return NULL;
}
static void
pool_takedown(struct _DriBufferPool *pool)
{
free(pool);
}
struct _DriBufferPool *
driMallocPoolInit(void)
{
struct _DriBufferPool *pool;
pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
if (!pool)
return NULL;
pool->data = NULL;
pool->fd = -1;
pool->map = &pool_map;
pool->unmap = &pool_unmap;
pool->destroy = &pool_destroy;
pool->offset = &pool_offset;
pool->poolOffset = &pool_poolOffset;
pool->flags = &pool_flags;
pool->size = &pool_size;
pool->create = &pool_create;
pool->fence = &pool_fence;
pool->kernel = &pool_kernel;
pool->validate = NULL;
pool->waitIdle = &pool_waitIdle;
pool->takeDown = &pool_takedown;
return pool;
}
@@ -0,0 +1,970 @@
/**************************************************************************
*
* Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
*/
#include <stdint.h>
#include <sys/time.h>
#include <errno.h>
#include <unistd.h>
#include <assert.h>
#include "ws_dri_bufpool.h"
#include "ws_dri_fencemgr.h"
#include "ws_dri_bufmgr.h"
#include "glthread.h"
#define DRI_SLABPOOL_ALLOC_RETRIES 100
struct _DriSlab;
struct _DriSlabBuffer {
int isSlabBuffer;
drmBO *bo;
struct _DriFenceObject *fence;
struct _DriSlab *parent;
drmMMListHead head;
uint32_t mapCount;
uint32_t start;
uint32_t fenceType;
int unFenced;
_glthread_Cond event;
};
struct _DriKernelBO {
int fd;
drmBO bo;
drmMMListHead timeoutHead;
drmMMListHead head;
struct timeval timeFreed;
uint32_t pageAlignment;
void *virtual;
};
struct _DriSlab{
drmMMListHead head;
drmMMListHead freeBuffers;
uint32_t numBuffers;
uint32_t numFree;
struct _DriSlabBuffer *buffers;
struct _DriSlabSizeHeader *header;
struct _DriKernelBO *kbo;
};
struct _DriSlabSizeHeader {
drmMMListHead slabs;
drmMMListHead freeSlabs;
drmMMListHead delayedBuffers;
uint32_t numDelayed;
struct _DriSlabPool *slabPool;
uint32_t bufSize;
_glthread_Mutex mutex;
};
struct _DriFreeSlabManager {
struct timeval slabTimeout;
struct timeval checkInterval;
struct timeval nextCheck;
drmMMListHead timeoutList;
drmMMListHead unCached;
drmMMListHead cached;
_glthread_Mutex mutex;
};
struct _DriSlabPool {
/*
* The data of this structure remains constant after
* initialization and thus needs no mutex protection.
*/
struct _DriFreeSlabManager *fMan;
uint64_t proposedFlags;
uint64_t validMask;
uint32_t *bucketSizes;
uint32_t numBuckets;
uint32_t pageSize;
int fd;
int pageAlignment;
int maxSlabSize;
int desiredNumBuffers;
struct _DriSlabSizeHeader *headers;
};
/*
* FIXME: Perhaps arrange timeout slabs in size buckets for fast
* retreival??
*/
static inline int
driTimeAfterEq(struct timeval *arg1, struct timeval *arg2)
{
return ((arg1->tv_sec > arg2->tv_sec) ||
((arg1->tv_sec == arg2->tv_sec) &&
(arg1->tv_usec > arg2->tv_usec)));
}
static inline void
driTimeAdd(struct timeval *arg, struct timeval *add)
{
unsigned int sec;
arg->tv_sec += add->tv_sec;
arg->tv_usec += add->tv_usec;
sec = arg->tv_usec / 1000000;
arg->tv_sec += sec;
arg->tv_usec -= sec*1000000;
}
static void
driFreeKernelBO(struct _DriKernelBO *kbo)
{
if (!kbo)
return;
(void) drmBOUnreference(kbo->fd, &kbo->bo);
free(kbo);
}
static void
driFreeTimeoutKBOsLocked(struct _DriFreeSlabManager *fMan,
struct timeval *time)
{
drmMMListHead *list, *next;
struct _DriKernelBO *kbo;
if (!driTimeAfterEq(time, &fMan->nextCheck))
return;
for (list = fMan->timeoutList.next, next = list->next;
list != &fMan->timeoutList;
list = next, next = list->next) {
kbo = DRMLISTENTRY(struct _DriKernelBO, list, timeoutHead);
if (!driTimeAfterEq(time, &kbo->timeFreed))
break;
DRMLISTDELINIT(&kbo->timeoutHead);
DRMLISTDELINIT(&kbo->head);
driFreeKernelBO(kbo);
}
fMan->nextCheck = *time;
driTimeAdd(&fMan->nextCheck, &fMan->checkInterval);
}
/*
* Add a _DriKernelBO to the free slab manager.
* This means that it is available for reuse, but if it's not
* reused in a while, it will be freed.
*/
static void
driSetKernelBOFree(struct _DriFreeSlabManager *fMan,
struct _DriKernelBO *kbo)
{
struct timeval time;
_glthread_LOCK_MUTEX(fMan->mutex);
gettimeofday(&time, NULL);
driTimeAdd(&time, &fMan->slabTimeout);
kbo->timeFreed = time;
if (kbo->bo.flags & DRM_BO_FLAG_CACHED)
DRMLISTADD(&kbo->head, &fMan->cached);
else
DRMLISTADD(&kbo->head, &fMan->unCached);
DRMLISTADDTAIL(&kbo->timeoutHead, &fMan->timeoutList);
driFreeTimeoutKBOsLocked(fMan, &time);
_glthread_UNLOCK_MUTEX(fMan->mutex);
}
/*
* Get a _DriKernelBO for us to use as storage for a slab.
*
*/
static struct _DriKernelBO *
driAllocKernelBO(struct _DriSlabSizeHeader *header)
{
struct _DriSlabPool *slabPool = header->slabPool;
struct _DriFreeSlabManager *fMan = slabPool->fMan;
drmMMListHead *list, *next, *head;
uint32_t size = header->bufSize * slabPool->desiredNumBuffers;
struct _DriKernelBO *kbo;
struct _DriKernelBO *kboTmp;
int ret;
/*
* FIXME: We should perhaps allow some variation in slabsize in order
* to efficiently reuse slabs.
*/
size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize;
size = (size + slabPool->pageSize - 1) & ~(slabPool->pageSize - 1);
_glthread_LOCK_MUTEX(fMan->mutex);
kbo = NULL;
retry:
head = (slabPool->proposedFlags & DRM_BO_FLAG_CACHED) ?
&fMan->cached : &fMan->unCached;
for (list = head->next, next = list->next;
list != head;
list = next, next = list->next) {
kboTmp = DRMLISTENTRY(struct _DriKernelBO, list, head);
if ((kboTmp->bo.size == size) &&
(slabPool->pageAlignment == 0 ||
(kboTmp->pageAlignment % slabPool->pageAlignment) == 0)) {
if (!kbo)
kbo = kboTmp;
if ((kbo->bo.proposedFlags ^ slabPool->proposedFlags) == 0)
break;
}
}
if (kbo) {
DRMLISTDELINIT(&kbo->head);
DRMLISTDELINIT(&kbo->timeoutHead);
}
_glthread_UNLOCK_MUTEX(fMan->mutex);
if (kbo) {
uint64_t new_mask = kbo->bo.proposedFlags ^ slabPool->proposedFlags;
ret = 0;
if (new_mask) {
ret = drmBOSetStatus(kbo->fd, &kbo->bo, slabPool->proposedFlags,
new_mask, DRM_BO_HINT_DONT_FENCE, 0, 0);
}
if (ret == 0)
return kbo;
driFreeKernelBO(kbo);
kbo = NULL;
goto retry;
}
kbo = calloc(1, sizeof(struct _DriKernelBO));
if (!kbo)
return NULL;
kbo->fd = slabPool->fd;
DRMINITLISTHEAD(&kbo->head);
DRMINITLISTHEAD(&kbo->timeoutHead);
ret = drmBOCreate(kbo->fd, size, slabPool->pageAlignment, NULL,
slabPool->proposedFlags,
DRM_BO_HINT_DONT_FENCE, &kbo->bo);
if (ret)
goto out_err0;
ret = drmBOMap(kbo->fd, &kbo->bo,
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
0, &kbo->virtual);
if (ret)
goto out_err1;
ret = drmBOUnmap(kbo->fd, &kbo->bo);
if (ret)
goto out_err1;
return kbo;
out_err1:
drmBOUnreference(kbo->fd, &kbo->bo);
out_err0:
free(kbo);
return NULL;
}
static int
driAllocSlab(struct _DriSlabSizeHeader *header)
{
struct _DriSlab *slab;
struct _DriSlabBuffer *buf;
uint32_t numBuffers;
int ret;
int i;
slab = calloc(1, sizeof(*slab));
if (!slab)
return -ENOMEM;
slab->kbo = driAllocKernelBO(header);
if (!slab->kbo) {
ret = -ENOMEM;
goto out_err0;
}
numBuffers = slab->kbo->bo.size / header->bufSize;
slab->buffers = calloc(numBuffers, sizeof(*slab->buffers));
if (!slab->buffers) {
ret = -ENOMEM;
goto out_err1;
}
DRMINITLISTHEAD(&slab->head);
DRMINITLISTHEAD(&slab->freeBuffers);
slab->numBuffers = numBuffers;
slab->numFree = 0;
slab->header = header;
buf = slab->buffers;
for (i=0; i < numBuffers; ++i) {
buf->parent = slab;
buf->start = i* header->bufSize;
buf->mapCount = 0;
buf->isSlabBuffer = 1;
_glthread_INIT_COND(buf->event);
DRMLISTADDTAIL(&buf->head, &slab->freeBuffers);
slab->numFree++;
buf++;
}
DRMLISTADDTAIL(&slab->head, &header->slabs);
return 0;
out_err1:
driSetKernelBOFree(header->slabPool->fMan, slab->kbo);
free(slab->buffers);
out_err0:
free(slab);
return ret;
}
/*
* Delete a buffer from the slab header delayed list and put
* it on the slab free list.
*/
static void
driSlabFreeBufferLocked(struct _DriSlabBuffer *buf)
{
struct _DriSlab *slab = buf->parent;
struct _DriSlabSizeHeader *header = slab->header;
drmMMListHead *list = &buf->head;
DRMLISTDEL(list);
DRMLISTADDTAIL(list, &slab->freeBuffers);
slab->numFree++;
if (slab->head.next == &slab->head)
DRMLISTADDTAIL(&slab->head, &header->slabs);
if (slab->numFree == slab->numBuffers) {
list = &slab->head;
DRMLISTDEL(list);
DRMLISTADDTAIL(list, &header->freeSlabs);
}
if (header->slabs.next == &header->slabs ||
slab->numFree != slab->numBuffers) {
drmMMListHead *next;
struct _DriFreeSlabManager *fMan = header->slabPool->fMan;
for (list = header->freeSlabs.next, next = list->next;
list != &header->freeSlabs;
list = next, next = list->next) {
slab = DRMLISTENTRY(struct _DriSlab, list, head);
DRMLISTDELINIT(list);
driSetKernelBOFree(fMan, slab->kbo);
free(slab->buffers);
free(slab);
}
}
}
static void
driSlabCheckFreeLocked(struct _DriSlabSizeHeader *header, int wait)
{
drmMMListHead *list, *prev, *first;
struct _DriSlabBuffer *buf;
struct _DriSlab *slab;
int firstWasSignaled = 1;
int signaled;
int i;
int ret;
/*
* Rerun the freeing test if the youngest tested buffer
* was signaled, since there might be more idle buffers
* in the delay list.
*/
while (firstWasSignaled) {
firstWasSignaled = 0;
signaled = 0;
first = header->delayedBuffers.next;
/* Only examine the oldest 1/3 of delayed buffers:
*/
if (header->numDelayed > 3) {
for (i = 0; i < header->numDelayed; i += 3) {
first = first->next;
}
}
for (list = first, prev = list->prev;
list != &header->delayedBuffers;
list = prev, prev = list->prev) {
buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
slab = buf->parent;
if (!signaled) {
if (wait) {
ret = driFenceFinish(buf->fence, buf->fenceType, 0);
if (ret)
break;
signaled = 1;
wait = 0;
} else {
signaled = driFenceSignaled(buf->fence, buf->fenceType);
}
if (signaled) {
if (list == first)
firstWasSignaled = 1;
driFenceUnReference(&buf->fence);
header->numDelayed--;
driSlabFreeBufferLocked(buf);
}
} else if (driFenceSignaledCached(buf->fence, buf->fenceType)) {
driFenceUnReference(&buf->fence);
header->numDelayed--;
driSlabFreeBufferLocked(buf);
}
}
}
}
static struct _DriSlabBuffer *
driSlabAllocBuffer(struct _DriSlabSizeHeader *header)
{
static struct _DriSlabBuffer *buf;
struct _DriSlab *slab;
drmMMListHead *list;
int count = DRI_SLABPOOL_ALLOC_RETRIES;
_glthread_LOCK_MUTEX(header->mutex);
while(header->slabs.next == &header->slabs && count > 0) {
driSlabCheckFreeLocked(header, 0);
if (header->slabs.next != &header->slabs)
break;
_glthread_UNLOCK_MUTEX(header->mutex);
if (count != DRI_SLABPOOL_ALLOC_RETRIES)
usleep(1);
_glthread_LOCK_MUTEX(header->mutex);
(void) driAllocSlab(header);
count--;
}
list = header->slabs.next;
if (list == &header->slabs) {
_glthread_UNLOCK_MUTEX(header->mutex);
return NULL;
}
slab = DRMLISTENTRY(struct _DriSlab, list, head);
if (--slab->numFree == 0)
DRMLISTDELINIT(list);
list = slab->freeBuffers.next;
DRMLISTDELINIT(list);
_glthread_UNLOCK_MUTEX(header->mutex);
buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
return buf;
}
static void *
pool_create(struct _DriBufferPool *driPool, unsigned long size,
uint64_t flags, unsigned hint, unsigned alignment)
{
struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
struct _DriSlabSizeHeader *header;
struct _DriSlabBuffer *buf;
void *dummy;
int i;
int ret;
/*
* FIXME: Check for compatibility.
*/
header = pool->headers;
for (i=0; i<pool->numBuckets; ++i) {
if (header->bufSize >= size)
break;
header++;
}
if (i < pool->numBuckets)
return driSlabAllocBuffer(header);
/*
* Fall back to allocate a buffer object directly from DRM.
* and wrap it in a driBO structure.
*/
buf = calloc(1, sizeof(*buf));
if (!buf)
return NULL;
buf->bo = calloc(1, sizeof(*buf->bo));
if (!buf->bo)
goto out_err0;
if (alignment) {
if ((alignment < pool->pageSize) && (pool->pageSize % alignment))
goto out_err1;
if ((alignment > pool->pageSize) && (alignment % pool->pageSize))
goto out_err1;
}
ret = drmBOCreate(pool->fd, size, alignment / pool->pageSize, NULL,
flags, hint, buf->bo);
if (ret)
goto out_err1;
ret = drmBOMap(pool->fd, buf->bo, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
0, &dummy);
if (ret)
goto out_err2;
ret = drmBOUnmap(pool->fd, buf->bo);
if (ret)
goto out_err2;
return buf;
out_err2:
drmBOUnreference(pool->fd, buf->bo);
out_err1:
free(buf->bo);
out_err0:
free(buf);
return NULL;
}
static int
pool_destroy(struct _DriBufferPool *driPool, void *private)
{
struct _DriSlabBuffer *buf =
(struct _DriSlabBuffer *) private;
struct _DriSlab *slab;
struct _DriSlabSizeHeader *header;
if (!buf->isSlabBuffer) {
struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
int ret;
ret = drmBOUnreference(pool->fd, buf->bo);
free(buf->bo);
free(buf);
return ret;
}
slab = buf->parent;
header = slab->header;
_glthread_LOCK_MUTEX(header->mutex);
buf->unFenced = 0;
buf->mapCount = 0;
if (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType)) {
DRMLISTADDTAIL(&buf->head, &header->delayedBuffers);
header->numDelayed++;
} else {
if (buf->fence)
driFenceUnReference(&buf->fence);
driSlabFreeBufferLocked(buf);
}
_glthread_UNLOCK_MUTEX(header->mutex);
return 0;
}
static int
pool_waitIdle(struct _DriBufferPool *driPool, void *private,
_glthread_Mutex *mutex, int lazy)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
while(buf->unFenced)
_glthread_COND_WAIT(buf->event, *mutex);
if (!buf->fence)
return 0;
driFenceFinish(buf->fence, buf->fenceType, lazy);
driFenceUnReference(&buf->fence);
return 0;
}
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, _glthread_Mutex *mutex, void **virtual)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
int busy;
if (buf->isSlabBuffer)
busy = buf->unFenced || (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType));
else
busy = buf->fence && !driFenceSignaled(buf->fence, buf->fenceType);
if (busy) {
if (hint & DRM_BO_HINT_DONT_BLOCK)
return -EBUSY;
else {
(void) pool_waitIdle(pool, private, mutex, 0);
}
}
++buf->mapCount;
*virtual = (buf->isSlabBuffer) ?
(void *) ((uint8_t *) buf->parent->kbo->virtual + buf->start) :
(void *) buf->bo->virtual;
return 0;
}
static int
pool_unmap(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
--buf->mapCount;
if (buf->mapCount == 0 && buf->isSlabBuffer)
_glthread_COND_BROADCAST(buf->event);
return 0;
}
static unsigned long
pool_offset(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
struct _DriSlab *slab;
struct _DriSlabSizeHeader *header;
if (!buf->isSlabBuffer) {
assert(buf->bo->proposedFlags & DRM_BO_FLAG_NO_MOVE);
return buf->bo->offset;
}
slab = buf->parent;
header = slab->header;
(void) header;
assert(header->slabPool->proposedFlags & DRM_BO_FLAG_NO_MOVE);
return slab->kbo->bo.offset + buf->start;
}
static unsigned long
pool_poolOffset(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
return buf->start;
}
static uint64_t
pool_flags(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
if (!buf->isSlabBuffer)
return buf->bo->flags;
return buf->parent->kbo->bo.flags;
}
static unsigned long
pool_size(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
if (!buf->isSlabBuffer)
return buf->bo->size;
return buf->parent->header->bufSize;
}
static int
pool_fence(struct _DriBufferPool *pool, void *private,
struct _DriFenceObject *fence)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
drmBO *bo;
if (buf->fence)
driFenceUnReference(&buf->fence);
buf->fence = driFenceReference(fence);
bo = (buf->isSlabBuffer) ?
&buf->parent->kbo->bo:
buf->bo;
buf->fenceType = bo->fenceFlags;
buf->unFenced = 0;
_glthread_COND_BROADCAST(buf->event);
return 0;
}
static drmBO *
pool_kernel(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
if (!buf)
return NULL;
return (buf->isSlabBuffer) ? &buf->parent->kbo->bo : buf->bo;
}
static int
pool_validate(struct _DriBufferPool *pool, void *private,
_glthread_Mutex *mutex)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
if (!buf->isSlabBuffer)
return 0;
while(buf->mapCount != 0)
_glthread_COND_WAIT(buf->event, *mutex);
buf->unFenced = 1;
return 0;
}
struct _DriFreeSlabManager *
driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec)
{
struct _DriFreeSlabManager *tmp;
tmp = calloc(1, sizeof(*tmp));
if (!tmp)
return NULL;
_glthread_INIT_MUTEX(tmp->mutex);
_glthread_LOCK_MUTEX(tmp->mutex);
tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000;
tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000;
tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000;
tmp->checkInterval.tv_usec = checkIntervalMsec*1000;
tmp->checkInterval.tv_sec = tmp->checkInterval.tv_usec / 1000000;
tmp->checkInterval.tv_usec -= tmp->checkInterval.tv_sec*1000000;
gettimeofday(&tmp->nextCheck, NULL);
driTimeAdd(&tmp->nextCheck, &tmp->checkInterval);
DRMINITLISTHEAD(&tmp->timeoutList);
DRMINITLISTHEAD(&tmp->unCached);
DRMINITLISTHEAD(&tmp->cached);
_glthread_UNLOCK_MUTEX(tmp->mutex);
return tmp;
}
void
driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan)
{
struct timeval time;
time = fMan->nextCheck;
driTimeAdd(&time, &fMan->checkInterval);
_glthread_LOCK_MUTEX(fMan->mutex);
driFreeTimeoutKBOsLocked(fMan, &time);
_glthread_UNLOCK_MUTEX(fMan->mutex);
assert(fMan->timeoutList.next == &fMan->timeoutList);
assert(fMan->unCached.next == &fMan->unCached);
assert(fMan->cached.next == &fMan->cached);
free(fMan);
}
static void
driInitSizeHeader(struct _DriSlabPool *pool, uint32_t size,
struct _DriSlabSizeHeader *header)
{
_glthread_INIT_MUTEX(header->mutex);
_glthread_LOCK_MUTEX(header->mutex);
DRMINITLISTHEAD(&header->slabs);
DRMINITLISTHEAD(&header->freeSlabs);
DRMINITLISTHEAD(&header->delayedBuffers);
header->numDelayed = 0;
header->slabPool = pool;
header->bufSize = size;
_glthread_UNLOCK_MUTEX(header->mutex);
}
static void
driFinishSizeHeader(struct _DriSlabSizeHeader *header)
{
drmMMListHead *list, *next;
struct _DriSlabBuffer *buf;
_glthread_LOCK_MUTEX(header->mutex);
for (list = header->delayedBuffers.next, next = list->next;
list != &header->delayedBuffers;
list = next, next = list->next) {
buf = DRMLISTENTRY(struct _DriSlabBuffer, list , head);
if (buf->fence) {
(void) driFenceFinish(buf->fence, buf->fenceType, 0);
driFenceUnReference(&buf->fence);
}
header->numDelayed--;
driSlabFreeBufferLocked(buf);
}
_glthread_UNLOCK_MUTEX(header->mutex);
}
static void
pool_takedown(struct _DriBufferPool *driPool)
{
struct _DriSlabPool *pool = driPool->data;
int i;
for (i=0; i<pool->numBuckets; ++i) {
driFinishSizeHeader(&pool->headers[i]);
}
free(pool->headers);
free(pool->bucketSizes);
free(pool);
free(driPool);
}
struct _DriBufferPool *
driSlabPoolInit(int fd, uint64_t flags,
uint64_t validMask,
uint32_t smallestSize,
uint32_t numSizes,
uint32_t desiredNumBuffers,
uint32_t maxSlabSize,
uint32_t pageAlignment,
struct _DriFreeSlabManager *fMan)
{
struct _DriBufferPool *driPool;
struct _DriSlabPool *pool;
uint32_t i;
driPool = calloc(1, sizeof(*driPool));
if (!driPool)
return NULL;
pool = calloc(1, sizeof(*pool));
if (!pool)
goto out_err0;
pool->bucketSizes = calloc(numSizes, sizeof(*pool->bucketSizes));
if (!pool->bucketSizes)
goto out_err1;
pool->headers = calloc(numSizes, sizeof(*pool->headers));
if (!pool->headers)
goto out_err2;
pool->fMan = fMan;
pool->proposedFlags = flags;
pool->validMask = validMask;
pool->numBuckets = numSizes;
pool->pageSize = getpagesize();
pool->fd = fd;
pool->pageAlignment = pageAlignment;
pool->maxSlabSize = maxSlabSize;
pool->desiredNumBuffers = desiredNumBuffers;
for (i=0; i<pool->numBuckets; ++i) {
pool->bucketSizes[i] = (smallestSize << i);
driInitSizeHeader(pool, pool->bucketSizes[i],
&pool->headers[i]);
}
driPool->data = (void *) pool;
driPool->map = &pool_map;
driPool->unmap = &pool_unmap;
driPool->destroy = &pool_destroy;
driPool->offset = &pool_offset;
driPool->poolOffset = &pool_poolOffset;
driPool->flags = &pool_flags;
driPool->size = &pool_size;
driPool->create = &pool_create;
driPool->fence = &pool_fence;
driPool->kernel = &pool_kernel;
driPool->validate = &pool_validate;
driPool->waitIdle = &pool_waitIdle;
driPool->takeDown = &pool_takedown;
return driPool;
out_err2:
free(pool->bucketSizes);
out_err1:
free(pool);
out_err0:
free(driPool);
return NULL;
}
+44 -4
View File
@@ -116,9 +116,49 @@ typedef pthread_mutex_t _glthread_Mutex;
#define _glthread_UNLOCK_MUTEX(name) \
(void) pthread_mutex_unlock(&(name))
#endif /* PTHREADS */
typedef pthread_cond_t _glthread_Cond;
#define _glthread_DECLARE_STATIC_COND(name) \
static _glthread_Cond name = PTHREAD_COND_INITIALIZER
#define _glthread_INIT_COND(cond) \
pthread_cond_init(&(cond), NULL)
#define _glthread_DESTROY_COND(name) \
pthread_cond_destroy(&(name))
#define _glthread_COND_WAIT(cond, mutex) \
pthread_cond_wait(&(cond), &(mutex))
#define _glthread_COND_SIGNAL(cond) \
pthread_cond_signal(&(cond))
#define _glthread_COND_BROADCAST(cond) \
pthread_cond_broadcast(&(cond))
#else /* PTHREADS */
typedef unsigned int _glthread_Cond;
#define _glthread_DECLARE_STATIC_COND(name) \
// #warning Condition variables not implemented.
#define _glthread_INIT_COND(cond) \
abort();
#define _glthread_DESTROY_COND(name) \
abort();
#define _glthread_COND_WAIT(cond, mutex) \
abort();
#define _glthread_COND_SIGNAL(cond) \
abort();
#define _glthread_COND_BROADCAST(cond) \
abort();
#endif
/*
@@ -259,11 +299,11 @@ typedef benaphore _glthread_Mutex;
* THREADS not defined
*/
typedef unsigned _glthread_TSD;
typedef GLuint _glthread_TSD;
typedef unsigned _glthread_Thread;
typedef GLuint _glthread_Thread;
typedef unsigned _glthread_Mutex;
typedef GLuint _glthread_Mutex;
#define _glthread_DECLARE_STATIC_MUTEX(name) static _glthread_Mutex name = 0