Fix and hook up bufmgr code to the build.
parent
6a9eb08a87
commit
c4857429c7
68
configure.ac
68
configure.ac
|
@ -35,9 +35,77 @@ AC_SYS_LARGEFILE
|
|||
pkgconfigdir=${libdir}/pkgconfig
|
||||
AC_SUBST(pkgconfigdir)
|
||||
|
||||
|
||||
dnl ===========================================================================
|
||||
dnl check compiler flags
|
||||
AC_DEFUN([LIBDRM_CC_TRY_FLAG], [
|
||||
AC_MSG_CHECKING([whether $CC supports $1])
|
||||
|
||||
libdrm_save_CFLAGS="$CFLAGS"
|
||||
CFLAGS="$CFLAGS $1"
|
||||
|
||||
AC_COMPILE_IFELSE([ ], [libdrm_cc_flag=yes], [libdrm_cc_flag=no])
|
||||
CFLAGS="$libdrm_save_CFLAGS"
|
||||
|
||||
if test "x$libdrm_cc_flag" = "xyes"; then
|
||||
ifelse([$2], , :, [$2])
|
||||
else
|
||||
ifelse([$3], , :, [$3])
|
||||
fi
|
||||
AC_MSG_RESULT([$libdrm_cc_flag])
|
||||
])
|
||||
|
||||
dnl Use lots of warning flags with with gcc and compatible compilers
|
||||
|
||||
dnl Note: if you change the following variable, the cache is automatically
|
||||
dnl skipped and all flags rechecked. So there's no need to do anything
|
||||
dnl else. If for any reason you need to force a recheck, just change
|
||||
dnl MAYBE_WARN in an ignorable way (like adding whitespace)
|
||||
|
||||
MAYBE_WARN="-Wall -Wextra \
|
||||
-Wsign-compare -Werror-implicit-function-declaration \
|
||||
-Wpointer-arith -Wwrite-strings -Wstrict-prototypes \
|
||||
-Wmissing-prototypes -Wmissing-declarations -Wnested-externs \
|
||||
-Wpacked -Wswitch-enum -Wmissing-format-attribute \
|
||||
-Wstrict-aliasing=2 -Winit-self -Wunsafe-loop-optimizations \
|
||||
-Wdeclaration-after-statement -Wold-style-definition \
|
||||
-Wno-missing-field-initializers -Wno-unused-parameter \
|
||||
-Wno-attributes -Wno-long-long -Winline"
|
||||
|
||||
# invalidate cached value if MAYBE_WARN has changed
|
||||
if test "x$libdrm_cv_warn_maybe" != "x$MAYBE_WARN"; then
|
||||
unset libdrm_cv_warn_cflags
|
||||
fi
|
||||
AC_CACHE_CHECK([for supported warning flags], libdrm_cv_warn_cflags, [
|
||||
echo
|
||||
WARN_CFLAGS=""
|
||||
|
||||
# Some warning options are not supported by all versions of
|
||||
# gcc, so test all desired options against the current
|
||||
# compiler.
|
||||
#
|
||||
# Note that there are some order dependencies
|
||||
# here. Specifically, an option that disables a warning will
|
||||
# have no net effect if a later option then enables that
|
||||
# warnings, (perhaps implicitly). So we put some grouped
|
||||
# options (-Wall and -Wextra) up front and the -Wno options
|
||||
# last.
|
||||
|
||||
for W in $MAYBE_WARN; do
|
||||
LIBDRM_CC_TRY_FLAG([$W], [WARN_CFLAGS="$WARN_CFLAGS $W"])
|
||||
done
|
||||
|
||||
libdrm_cv_warn_cflags=$WARN_CFLAGS
|
||||
libdrm_cv_warn_maybe=$MAYBE_WARN
|
||||
|
||||
AC_MSG_CHECKING([which warning flags were supported])])
|
||||
WARN_CFLAGS="$libdrm_cv_warn_cflags"
|
||||
|
||||
AC_SUBST(WARN_CFLAGS)
|
||||
AC_OUTPUT([
|
||||
Makefile
|
||||
libdrm/Makefile
|
||||
libdrm/intel/Makefile
|
||||
shared-core/Makefile
|
||||
tests/Makefile
|
||||
libdrm.pc])
|
||||
|
|
|
@ -18,14 +18,17 @@
|
|||
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
SUBDIRS = intel
|
||||
|
||||
libdrm_la_LTLIBRARIES = libdrm.la
|
||||
libdrm_ladir = $(libdir)
|
||||
libdrm_la_LDFLAGS = -version-number 2:3:0 -no-undefined
|
||||
|
||||
AM_CFLAGS = -I$(top_srcdir)/shared-core
|
||||
libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c
|
||||
libdrm_la_DEPENDENCIES = intel/libdrm_intel.la
|
||||
|
||||
libdrmincludedir = ${includedir}
|
||||
libdrminclude_HEADERS = xf86drm.h xf86mm.h
|
||||
libdrminclude_HEADERS = xf86drm.h xf86mm.h dri_bufmgr.h
|
||||
|
||||
EXTRA_DIST = ChangeLog TODO
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
#include "mtypes.h"
|
||||
#include "dri_bufmgr.h"
|
||||
|
||||
/** @file dri_bufmgr.c
|
||||
|
@ -38,29 +37,9 @@
|
|||
|
||||
dri_bo *
|
||||
dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
|
||||
unsigned int alignment, uint64_t location_mask)
|
||||
unsigned int alignment)
|
||||
{
|
||||
assert((location_mask & ~(DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_MEM_TT |
|
||||
DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MEM_PRIV0 |
|
||||
DRM_BO_FLAG_MEM_PRIV1 | DRM_BO_FLAG_MEM_PRIV2 |
|
||||
DRM_BO_FLAG_MEM_PRIV3 | DRM_BO_FLAG_MEM_PRIV4 |
|
||||
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED)) == 0);
|
||||
return bufmgr->bo_alloc(bufmgr, name, size, alignment, location_mask);
|
||||
}
|
||||
|
||||
dri_bo *
|
||||
dri_bo_alloc_static(dri_bufmgr *bufmgr, const char *name, unsigned long offset,
|
||||
unsigned long size, void *virtual,
|
||||
uint64_t location_mask)
|
||||
{
|
||||
assert((location_mask & ~(DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_MEM_TT |
|
||||
DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MEM_PRIV0 |
|
||||
DRM_BO_FLAG_MEM_PRIV1 | DRM_BO_FLAG_MEM_PRIV2 |
|
||||
DRM_BO_FLAG_MEM_PRIV3 |
|
||||
DRM_BO_FLAG_MEM_PRIV4)) == 0);
|
||||
|
||||
return bufmgr->bo_alloc_static(bufmgr, name, offset, size, virtual,
|
||||
location_mask);
|
||||
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -79,7 +58,7 @@ dri_bo_unreference(dri_bo *bo)
|
|||
}
|
||||
|
||||
int
|
||||
dri_bo_map(dri_bo *buf, GLboolean write_enable)
|
||||
dri_bo_map(dri_bo *buf, int write_enable)
|
||||
{
|
||||
return buf->bufmgr->bo_map(buf, write_enable);
|
||||
}
|
||||
|
@ -100,7 +79,7 @@ dri_bo_subdata(dri_bo *bo, unsigned long offset,
|
|||
if (size == 0 || data == NULL)
|
||||
return 0;
|
||||
|
||||
ret = dri_bo_map(bo, GL_TRUE);
|
||||
ret = dri_bo_map(bo, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
memcpy((unsigned char *)bo->virtual + offset, data, size);
|
||||
|
@ -119,7 +98,7 @@ dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
|
|||
if (size == 0 || data == NULL)
|
||||
return 0;
|
||||
|
||||
ret = dri_bo_map(bo, GL_FALSE);
|
||||
ret = dri_bo_map(bo, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
memcpy(data, (unsigned char *)bo->virtual + offset, size);
|
||||
|
@ -139,15 +118,6 @@ dri_bufmgr_destroy(dri_bufmgr *bufmgr)
|
|||
bufmgr->destroy(bufmgr);
|
||||
}
|
||||
|
||||
|
||||
int dri_emit_reloc(dri_bo *reloc_buf,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta, uint32_t offset, dri_bo *target_buf)
|
||||
{
|
||||
return reloc_buf->bufmgr->emit_reloc(reloc_buf, read_domains, write_domain,
|
||||
delta, offset, target_buf);
|
||||
}
|
||||
|
||||
void *dri_process_relocs(dri_bo *batch_buf)
|
||||
{
|
||||
return batch_buf->bufmgr->process_relocs(batch_buf);
|
||||
|
@ -159,7 +129,7 @@ void dri_post_submit(dri_bo *batch_buf)
|
|||
}
|
||||
|
||||
void
|
||||
dri_bufmgr_set_debug(dri_bufmgr *bufmgr, GLboolean enable_debug)
|
||||
dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug)
|
||||
{
|
||||
bufmgr->debug = enable_debug;
|
||||
}
|
||||
|
|
|
@ -75,18 +75,7 @@ struct _dri_bufmgr {
|
|||
* to be used from the graphics device.
|
||||
*/
|
||||
dri_bo *(*bo_alloc)(dri_bufmgr *bufmgr_ctx, const char *name,
|
||||
unsigned long size, unsigned int alignment,
|
||||
uint64_t location_mask);
|
||||
|
||||
/**
|
||||
* Allocates a buffer object for a static allocation.
|
||||
*
|
||||
* Static allocations are ones such as the front buffer that are offered by
|
||||
* the X Server, which are never evicted and never moved.
|
||||
*/
|
||||
dri_bo *(*bo_alloc_static)(dri_bufmgr *bufmgr_ctx, const char *name,
|
||||
unsigned long offset, unsigned long size,
|
||||
void *virtual, uint64_t location_mask);
|
||||
unsigned long size, unsigned int alignment);
|
||||
|
||||
/** Takes a reference on a buffer object */
|
||||
void (*bo_reference)(dri_bo *bo);
|
||||
|
@ -104,7 +93,7 @@ struct _dri_bufmgr {
|
|||
* buffer to complete, first. The resulting mapping is available at
|
||||
* buf->virtual.
|
||||
*/
|
||||
int (*bo_map)(dri_bo *buf, GLboolean write_enable);
|
||||
int (*bo_map)(dri_bo *buf, int write_enable);
|
||||
|
||||
/** Reduces the refcount on the userspace mapping of the buffer object. */
|
||||
int (*bo_unmap)(dri_bo *buf);
|
||||
|
@ -140,31 +129,6 @@ struct _dri_bufmgr {
|
|||
*/
|
||||
void (*destroy)(dri_bufmgr *bufmgr);
|
||||
|
||||
/**
|
||||
* Add relocation entry in reloc_buf, which will be updated with the
|
||||
* target buffer's real offset on on command submission.
|
||||
*
|
||||
* Relocations remain in place for the lifetime of the buffer object.
|
||||
*
|
||||
* \param reloc_buf Buffer to write the relocation into.
|
||||
* \param flags BO flags to be used in validating the target buffer.
|
||||
* Applicable flags include:
|
||||
* - DRM_BO_FLAG_READ: The buffer will be read in the process of
|
||||
* command execution.
|
||||
* - DRM_BO_FLAG_WRITE: The buffer will be written in the process of
|
||||
* command execution.
|
||||
* - DRM_BO_FLAG_MEM_TT: The buffer should be validated in TT memory.
|
||||
* - DRM_BO_FLAG_MEM_VRAM: The buffer should be validated in video
|
||||
* memory.
|
||||
* \param delta Constant value to be added to the relocation target's offset.
|
||||
* \param offset Byte offset within batch_buf of the relocated pointer.
|
||||
* \param target Buffer whose offset should be written into the relocation
|
||||
* entry.
|
||||
*/
|
||||
int (*emit_reloc)(dri_bo *reloc_buf,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta, uint32_t offset, dri_bo *target);
|
||||
|
||||
/**
|
||||
* Processes the relocations, either in userland or by converting the list
|
||||
* for use in batchbuffer submission.
|
||||
|
@ -183,17 +147,14 @@ struct _dri_bufmgr {
|
|||
void (*post_submit)(dri_bo *batch_buf);
|
||||
|
||||
int (*check_aperture_space)(dri_bo *bo);
|
||||
GLboolean debug; /**< Enables verbose debugging printouts */
|
||||
int debug; /**< Enables verbose debugging printouts */
|
||||
};
|
||||
|
||||
dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
|
||||
unsigned int alignment, uint64_t location_mask);
|
||||
dri_bo *dri_bo_alloc_static(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned long offset, unsigned long size,
|
||||
void *virtual, uint64_t location_mask);
|
||||
unsigned int alignment);
|
||||
void dri_bo_reference(dri_bo *bo);
|
||||
void dri_bo_unreference(dri_bo *bo);
|
||||
int dri_bo_map(dri_bo *buf, GLboolean write_enable);
|
||||
int dri_bo_map(dri_bo *buf, int write_enable);
|
||||
int dri_bo_unmap(dri_bo *buf);
|
||||
|
||||
int dri_bo_subdata(dri_bo *bo, unsigned long offset,
|
||||
|
@ -202,12 +163,9 @@ int dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
|
|||
unsigned long size, void *data);
|
||||
void dri_bo_wait_rendering(dri_bo *bo);
|
||||
|
||||
void dri_bufmgr_set_debug(dri_bufmgr *bufmgr, GLboolean enable_debug);
|
||||
void dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug);
|
||||
void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
|
||||
|
||||
int dri_emit_reloc(dri_bo *reloc_buf,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta, uint32_t offset, dri_bo *target_buf);
|
||||
void *dri_process_relocs(dri_bo *batch_buf);
|
||||
void dri_post_process_relocs(dri_bo *batch_buf);
|
||||
void dri_post_submit(dri_bo *batch_buf);
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
# Copyright © 2008 Intel Corporation
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice (including the next
|
||||
# paragraph) shall be included in all copies or substantial portions of the
|
||||
# Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
# Authors:
|
||||
# Eric Anholt <eric@anholt.net>
|
||||
|
||||
AM_CFLAGS = \
|
||||
$(WARN_CFLAGS) \
|
||||
-I$(top_srcdir)/shared-core
|
||||
|
||||
noinst_LTLIBRARIES = libdrm_intel.la
|
||||
|
||||
libdrm_intel_la_SOURCES = \
|
||||
intel_bufmgr_fake.c \
|
||||
intel_bufmgr_gem.c \
|
||||
mm.c \
|
||||
mm.h
|
||||
|
||||
libdrm_intelincludedir = ${includedir}
|
||||
libdrm_intelinclude_HEADERS = intel_bufmgr.h
|
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
* Copyright © 2008 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Eric Anholt <eric@anholt.net>
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file intel_bufmgr.h
|
||||
*
|
||||
* Public definitions of Intel-specific bufmgr functions.
|
||||
*/
|
||||
|
||||
#ifndef INTEL_BUFMGR_GEM_H
|
||||
#define INTEL_BUFMGR_GEM_H
|
||||
|
||||
#include "dri_bufmgr.h"
|
||||
|
||||
/**
|
||||
* Intel-specific bufmgr bits that follow immediately after the
|
||||
* generic bufmgr structure.
|
||||
*/
|
||||
struct intel_bufmgr {
|
||||
/**
|
||||
* Add relocation entry in reloc_buf, which will be updated with the
|
||||
* target buffer's real offset on on command submission.
|
||||
*
|
||||
* Relocations remain in place for the lifetime of the buffer object.
|
||||
*
|
||||
* \param reloc_buf Buffer to write the relocation into.
|
||||
* \param read_domains GEM read domains which the buffer will be read into
|
||||
* by the command that this relocation is part of.
|
||||
* \param write_domains GEM read domains which the buffer will be dirtied
|
||||
* in by the command that this relocation is part of.
|
||||
* \param delta Constant value to be added to the relocation target's
|
||||
* offset.
|
||||
* \param offset Byte offset within batch_buf of the relocated pointer.
|
||||
* \param target Buffer whose offset should be written into the relocation
|
||||
* entry.
|
||||
*/
|
||||
int (*emit_reloc)(dri_bo *reloc_buf,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta, uint32_t offset, dri_bo *target);
|
||||
};
|
||||
|
||||
/* intel_bufmgr_gem.c */
|
||||
dri_bufmgr *intel_bufmgr_gem_init(int fd, int batch_size);
|
||||
dri_bo *intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned int handle);
|
||||
void intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr);
|
||||
|
||||
/* intel_bufmgr_fake.c */
|
||||
dri_bufmgr *intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
|
||||
unsigned long size,
|
||||
unsigned int (*fence_emit)(void *private),
|
||||
int (*fence_wait)(void *private,
|
||||
unsigned int cookie),
|
||||
void *driver_priv);
|
||||
dri_bo *intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned long offset, unsigned long size,
|
||||
void *virtual);
|
||||
|
||||
void intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr);
|
||||
void intel_bo_fake_disable_backing_store(dri_bo *bo,
|
||||
void (*invalidate_cb)(dri_bo *bo,
|
||||
void *ptr),
|
||||
void *ptr);
|
||||
|
||||
int intel_bo_emit_reloc(dri_bo *reloc_buf,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta, uint32_t offset, dri_bo *target_buf);
|
||||
|
||||
#endif /* INTEL_BUFMGR_GEM_H */
|
||||
|
|
@ -33,19 +33,19 @@
|
|||
* programming interface, but is more expressive and avoids many of
|
||||
* the bugs in the old texture manager.
|
||||
*/
|
||||
#include "mtypes.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include "dri_bufmgr.h"
|
||||
#include "intel_bufmgr_fake.h"
|
||||
#include "intel_bufmgr.h"
|
||||
#include "drm.h"
|
||||
#include "i915_drm.h"
|
||||
|
||||
#include "simple_list.h"
|
||||
#include "mm.h"
|
||||
#include "imports.h"
|
||||
|
||||
#define DBG(...) do { \
|
||||
if (bufmgr_fake->bufmgr.debug) \
|
||||
_mesa_printf(__VA_ARGS__); \
|
||||
drmMsg(__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
/* Internal flags:
|
||||
|
@ -67,11 +67,11 @@ struct fake_buffer_reloc
|
|||
/** Buffer object that the relocation points at. */
|
||||
dri_bo *target_buf;
|
||||
/** Offset of the relocation entry within reloc_buf. */
|
||||
GLuint offset;
|
||||
uint32_t offset;
|
||||
/** Cached value of the offset when we last performed this relocation. */
|
||||
GLuint last_target_offset;
|
||||
uint32_t last_target_offset;
|
||||
/** Value added to target_buf's offset to get the relocation entry. */
|
||||
GLuint delta;
|
||||
uint32_t delta;
|
||||
/** Cache domains the target buffer is read into. */
|
||||
uint32_t read_domains;
|
||||
/** Cache domain the target buffer will have dirty cachelines in. */
|
||||
|
@ -102,25 +102,36 @@ struct block {
|
|||
|
||||
typedef struct _bufmgr_fake {
|
||||
dri_bufmgr bufmgr;
|
||||
struct intel_bufmgr intel_bufmgr;
|
||||
|
||||
unsigned long low_offset;
|
||||
unsigned long size;
|
||||
void *virtual;
|
||||
|
||||
struct mem_block *heap;
|
||||
struct block lru; /* only allocated, non-fence-pending blocks here */
|
||||
|
||||
unsigned buf_nr; /* for generating ids */
|
||||
|
||||
struct block on_hardware; /* after bmValidateBuffers */
|
||||
struct block fenced; /* after bmFenceBuffers (mi_flush, emit irq, write dword) */
|
||||
/**
|
||||
* List of blocks which are currently in the GART but haven't been
|
||||
* fenced yet.
|
||||
*/
|
||||
struct block on_hardware;
|
||||
/**
|
||||
* List of blocks which are in the GART and have an active fence on them.
|
||||
*/
|
||||
struct block fenced;
|
||||
/**
|
||||
* List of blocks which have an expired fence and are ready to be evicted.
|
||||
*/
|
||||
struct block lru;
|
||||
/* then to bufmgr->lru or free() */
|
||||
|
||||
unsigned int last_fence;
|
||||
|
||||
unsigned fail:1;
|
||||
unsigned need_fence:1;
|
||||
GLboolean thrashing;
|
||||
int thrashing;
|
||||
|
||||
/**
|
||||
* Driver callback to emit a fence, returning the cookie.
|
||||
|
@ -134,9 +145,9 @@ typedef struct _bufmgr_fake {
|
|||
/** Driver-supplied argument to driver callbacks */
|
||||
void *driver_priv;
|
||||
|
||||
GLboolean debug;
|
||||
int debug;
|
||||
|
||||
GLboolean performed_rendering;
|
||||
int performed_rendering;
|
||||
|
||||
/* keep track of the current total size of objects we have relocs for */
|
||||
unsigned long current_total_size;
|
||||
|
@ -163,12 +174,12 @@ typedef struct _dri_bo_fake {
|
|||
uint32_t write_domain;
|
||||
|
||||
unsigned int alignment;
|
||||
GLboolean is_static, validated;
|
||||
int is_static, validated;
|
||||
unsigned int map_count;
|
||||
|
||||
/** relocation list */
|
||||
struct fake_buffer_reloc *relocs;
|
||||
GLuint nr_relocs;
|
||||
int nr_relocs;
|
||||
|
||||
struct block *block;
|
||||
void *backing_store;
|
||||
|
@ -183,18 +194,18 @@ static int dri_fake_check_aperture_space(dri_bo *bo);
|
|||
|
||||
#define MAXFENCE 0x7fffffff
|
||||
|
||||
static GLboolean FENCE_LTE( unsigned a, unsigned b )
|
||||
static int FENCE_LTE( unsigned a, unsigned b )
|
||||
{
|
||||
if (a == b)
|
||||
return GL_TRUE;
|
||||
return 1;
|
||||
|
||||
if (a < b && b - a < (1<<24))
|
||||
return GL_TRUE;
|
||||
return 1;
|
||||
|
||||
if (a > b && MAXFENCE - a + b < (1<<24))
|
||||
return GL_TRUE;
|
||||
return 1;
|
||||
|
||||
return GL_FALSE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
|
@ -211,14 +222,13 @@ _fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, unsigned int cookie)
|
|||
|
||||
ret = bufmgr_fake->fence_wait(bufmgr_fake->driver_priv, cookie);
|
||||
if (ret != 0) {
|
||||
_mesa_printf("%s:%d: Error %d waiting for fence.\n",
|
||||
__FILE__, __LINE__);
|
||||
drmMsg("%s:%d: Error %d waiting for fence.\n", __FILE__, __LINE__);
|
||||
abort();
|
||||
}
|
||||
clear_fenced(bufmgr_fake, cookie);
|
||||
}
|
||||
|
||||
static GLboolean
|
||||
static int
|
||||
_fence_test(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
|
||||
{
|
||||
/* Slight problem with wrap-around:
|
||||
|
@ -229,39 +239,39 @@ _fence_test(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
|
|||
/**
|
||||
* Allocate a memory manager block for the buffer.
|
||||
*/
|
||||
static GLboolean
|
||||
static int
|
||||
alloc_block(dri_bo *bo)
|
||||
{
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
dri_bufmgr_fake *bufmgr_fake= (dri_bufmgr_fake *)bo->bufmgr;
|
||||
struct block *block = (struct block *)calloc(sizeof *block, 1);
|
||||
unsigned int align_log2 = _mesa_ffs(bo_fake->alignment) - 1;
|
||||
GLuint sz;
|
||||
unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
|
||||
unsigned int sz;
|
||||
|
||||
if (!block)
|
||||
return GL_FALSE;
|
||||
return 1;
|
||||
|
||||
sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
|
||||
|
||||
block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
|
||||
block->mem = drmmmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
|
||||
if (!block->mem) {
|
||||
free(block);
|
||||
return GL_FALSE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
make_empty_list(block);
|
||||
DRMINITLISTHEAD(block);
|
||||
|
||||
/* Insert at head or at tail???
|
||||
*/
|
||||
insert_at_tail(&bufmgr_fake->lru, block);
|
||||
DRMLISTADDTAIL(block, &bufmgr_fake->lru);
|
||||
|
||||
block->virtual = bufmgr_fake->virtual +
|
||||
block->virtual = (uint8_t *)bufmgr_fake->virtual +
|
||||
block->mem->ofs - bufmgr_fake->low_offset;
|
||||
block->bo = bo;
|
||||
|
||||
bo_fake->block = block;
|
||||
|
||||
return GL_TRUE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Release the card storage associated with buf:
|
||||
|
@ -289,9 +299,9 @@ static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block)
|
|||
}
|
||||
else {
|
||||
DBG(" - free immediately\n");
|
||||
remove_from_list(block);
|
||||
DRMLISTDEL(block);
|
||||
|
||||
mmFreeMem(block->mem);
|
||||
drmmmFreeMem(block->mem);
|
||||
free(block);
|
||||
}
|
||||
}
|
||||
|
@ -304,7 +314,7 @@ alloc_backing_store(dri_bo *bo)
|
|||
assert(!bo_fake->backing_store);
|
||||
assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
|
||||
|
||||
bo_fake->backing_store = ALIGN_MALLOC(bo->size, 64);
|
||||
bo_fake->backing_store = malloc(bo->size);
|
||||
|
||||
DBG("alloc_backing - buf %d %p %d\n", bo_fake->id, bo_fake->backing_store, bo->size);
|
||||
assert(bo_fake->backing_store);
|
||||
|
@ -317,7 +327,7 @@ free_backing_store(dri_bo *bo)
|
|||
|
||||
if (bo_fake->backing_store) {
|
||||
assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
|
||||
ALIGN_FREE(bo_fake->backing_store);
|
||||
free(bo_fake->backing_store);
|
||||
bo_fake->backing_store = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -337,14 +347,14 @@ set_dirty(dri_bo *bo)
|
|||
bo_fake->dirty = 1;
|
||||
}
|
||||
|
||||
static GLboolean
|
||||
evict_lru(dri_bufmgr_fake *bufmgr_fake, GLuint max_fence)
|
||||
static int
|
||||
evict_lru(dri_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
|
||||
{
|
||||
struct block *block, *tmp;
|
||||
|
||||
DBG("%s\n", __FUNCTION__);
|
||||
|
||||
foreach_s(block, tmp, &bufmgr_fake->lru) {
|
||||
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
|
||||
|
||||
if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
|
||||
|
@ -357,23 +367,20 @@ evict_lru(dri_bufmgr_fake *bufmgr_fake, GLuint max_fence)
|
|||
bo_fake->block = NULL;
|
||||
|
||||
free_block(bufmgr_fake, block);
|
||||
return GL_TRUE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return GL_FALSE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define foreach_s_rev(ptr, t, list) \
|
||||
for(ptr=(list)->prev,t=(ptr)->prev; list != ptr; ptr=t, t=(t)->prev)
|
||||
|
||||
static GLboolean
|
||||
static int
|
||||
evict_mru(dri_bufmgr_fake *bufmgr_fake)
|
||||
{
|
||||
struct block *block, *tmp;
|
||||
|
||||
DBG("%s\n", __FUNCTION__);
|
||||
|
||||
foreach_s_rev(block, tmp, &bufmgr_fake->lru) {
|
||||
DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
|
||||
|
||||
if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
|
||||
|
@ -383,10 +390,10 @@ evict_mru(dri_bufmgr_fake *bufmgr_fake)
|
|||
bo_fake->block = NULL;
|
||||
|
||||
free_block(bufmgr_fake, block);
|
||||
return GL_TRUE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return GL_FALSE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -398,7 +405,7 @@ static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
|
|||
struct block *block, *tmp;
|
||||
int ret = 0;
|
||||
|
||||
foreach_s(block, tmp, &bufmgr_fake->fenced) {
|
||||
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
|
||||
assert(block->fenced);
|
||||
|
||||
if (_fence_test(bufmgr_fake, block->fence)) {
|
||||
|
@ -408,14 +415,15 @@ static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
|
|||
if (!block->bo) {
|
||||
DBG("delayed free: offset %x sz %x\n",
|
||||
block->mem->ofs, block->mem->size);
|
||||
remove_from_list(block);
|
||||
mmFreeMem(block->mem);
|
||||
DRMLISTDEL(block);
|
||||
drmmmFreeMem(block->mem);
|
||||
free(block);
|
||||
}
|
||||
else {
|
||||
DBG("return to lru: offset %x sz %x\n",
|
||||
block->mem->ofs, block->mem->size);
|
||||
move_to_tail(&bufmgr_fake->lru, block);
|
||||
DRMLISTDEL(block);
|
||||
DRMLISTADDTAIL(block, &bufmgr_fake->lru);
|
||||
}
|
||||
|
||||
ret = 1;
|
||||
|
@ -438,7 +446,7 @@ static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
|
|||
{
|
||||
struct block *block, *tmp;
|
||||
|
||||
foreach_s (block, tmp, &bufmgr_fake->on_hardware) {
|
||||
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
|
||||
DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n", block,
|
||||
block->mem->size, block->mem->ofs, block->bo, fence);
|
||||
block->fence = fence;
|
||||
|
@ -448,13 +456,14 @@ static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
|
|||
|
||||
/* Move to tail of pending list here
|
||||
*/
|
||||
move_to_tail(&bufmgr_fake->fenced, block);
|
||||
DRMLISTDEL(block);
|
||||
DRMLISTADDTAIL(block, &bufmgr_fake->fenced);
|
||||
}
|
||||
|
||||
assert(is_empty_list(&bufmgr_fake->on_hardware));
|
||||
assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
|
||||
}
|
||||
|
||||
static GLboolean evict_and_alloc_block(dri_bo *bo)
|
||||
static int evict_and_alloc_block(dri_bo *bo)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
|
@ -464,7 +473,7 @@ static GLboolean evict_and_alloc_block(dri_bo *bo)
|
|||
/* Search for already free memory:
|
||||
*/
|
||||
if (alloc_block(bo))
|
||||
return GL_TRUE;
|
||||
return 1;
|
||||
|
||||
/* If we're not thrashing, allow lru eviction to dig deeper into
|
||||
* recently used textures. We'll probably be thrashing soon:
|
||||
|
@ -472,7 +481,7 @@ static GLboolean evict_and_alloc_block(dri_bo *bo)
|
|||
if (!bufmgr_fake->thrashing) {
|
||||
while (evict_lru(bufmgr_fake, 0))
|
||||
if (alloc_block(bo))
|
||||
return GL_TRUE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Keep thrashing counter alive?
|
||||
|
@ -484,17 +493,17 @@ static GLboolean evict_and_alloc_block(dri_bo *bo)
|
|||
* freed memory that has been submitted to hardware and fenced to
|
||||
* become available:
|
||||
*/
|
||||
while (!is_empty_list(&bufmgr_fake->fenced)) {
|
||||
GLuint fence = bufmgr_fake->fenced.next->fence;
|
||||
while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
|
||||
uint32_t fence = bufmgr_fake->fenced.next->fence;
|
||||
_fence_wait_internal(bufmgr_fake, fence);
|
||||
|
||||
if (alloc_block(bo))
|
||||
return GL_TRUE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!is_empty_list(&bufmgr_fake->on_hardware)) {
|
||||
while (!is_empty_list(&bufmgr_fake->fenced)) {
|
||||
GLuint fence = bufmgr_fake->fenced.next->fence;
|
||||
if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) {
|
||||
while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
|
||||
uint32_t fence = bufmgr_fake->fenced.next->fence;
|
||||
_fence_wait_internal(bufmgr_fake, fence);
|
||||
}
|
||||
|
||||
|
@ -504,16 +513,16 @@ static GLboolean evict_and_alloc_block(dri_bo *bo)
|
|||
bufmgr_fake->thrashing = 20;
|
||||
|
||||
if (alloc_block(bo))
|
||||
return GL_TRUE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (evict_mru(bufmgr_fake))
|
||||
if (alloc_block(bo))
|
||||
return GL_TRUE;
|
||||
return 1;
|
||||
|
||||
DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size);
|
||||
|
||||
return GL_FALSE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
|
@ -555,7 +564,7 @@ dri_fake_bo_wait_rendering(dri_bo *bo)
|
|||
* -- and wait for idle
|
||||
*/
|
||||
void
|
||||
dri_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
|
||||
intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
struct block *block, *tmp;
|
||||
|
@ -572,10 +581,10 @@ dri_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
|
|||
/* Check that we hadn't released the lock without having fenced the last
|
||||
* set of buffers.
|
||||
*/
|
||||
assert(is_empty_list(&bufmgr_fake->fenced));
|
||||
assert(is_empty_list(&bufmgr_fake->on_hardware));
|
||||
assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
|
||||
assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
|
||||
|
||||
foreach_s(block, tmp, &bufmgr_fake->lru) {
|
||||
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
|
||||
assert(_fence_test(bufmgr_fake, block->fence));
|
||||
set_dirty(block->bo);
|
||||
}
|
||||
|
@ -583,8 +592,7 @@ dri_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
|
|||
|
||||
static dri_bo *
|
||||
dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment,
|
||||
uint64_t location_mask)
|
||||
unsigned long size, unsigned int alignment)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake;
|
||||
dri_bo_fake *bo_fake;
|
||||
|
@ -611,7 +619,7 @@ dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name,
|
|||
bo_fake->id = ++bufmgr_fake->buf_nr;
|
||||
bo_fake->name = name;
|
||||
bo_fake->flags = 0;
|
||||
bo_fake->is_static = GL_FALSE;
|
||||
bo_fake->is_static = 0;
|
||||
|
||||
DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
|
||||
bo_fake->bo.size / 1024);
|
||||
|
@ -619,10 +627,10 @@ dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name,
|
|||
return &bo_fake->bo;
|
||||
}
|
||||
|
||||
static dri_bo *
|
||||
dri_fake_bo_alloc_static(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned long offset, unsigned long size,
|
||||
void *virtual, uint64_t location_mask)
|
||||
dri_bo *
|
||||
intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned long offset, unsigned long size,
|
||||
void *virtual)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake;
|
||||
dri_bo_fake *bo_fake;
|
||||
|
@ -643,7 +651,7 @@ dri_fake_bo_alloc_static(dri_bufmgr *bufmgr, const char *name,
|
|||
bo_fake->id = ++bufmgr_fake->buf_nr;
|
||||
bo_fake->name = name;
|
||||
bo_fake->flags = BM_PINNED | DRM_BO_FLAG_NO_MOVE;
|
||||
bo_fake->is_static = GL_TRUE;
|
||||
bo_fake->is_static = 1;
|
||||
|
||||
DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
|
||||
bo_fake->bo.size / 1024);
|
||||
|
@ -692,10 +700,10 @@ dri_fake_bo_unreference(dri_bo *bo)
|
|||
* Set the buffer as not requiring backing store, and instead get the callback
|
||||
* invoked whenever it would be set dirty.
|
||||
*/
|
||||
void dri_bo_fake_disable_backing_store(dri_bo *bo,
|
||||
void (*invalidate_cb)(dri_bo *bo,
|
||||
void *ptr),
|
||||
void *ptr)
|
||||
void intel_bo_fake_disable_backing_store(dri_bo *bo,
|
||||
void (*invalidate_cb)(dri_bo *bo,
|
||||
void *ptr),
|
||||
void *ptr)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
|
@ -723,7 +731,7 @@ void dri_bo_fake_disable_backing_store(dri_bo *bo,
|
|||
* BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
|
||||
*/
|
||||
static int
|
||||
dri_fake_bo_map(dri_bo *bo, GLboolean write_enable)
|
||||
dri_fake_bo_map(dri_bo *bo, int write_enable)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
|
@ -744,7 +752,7 @@ dri_fake_bo_map(dri_bo *bo, GLboolean write_enable)
|
|||
bo_fake->bo.size / 1024);
|
||||
|
||||
if (bo->virtual != NULL) {
|
||||
_mesa_printf("%s: already mapped\n", __FUNCTION__);
|
||||
drmMsg("%s: already mapped\n", __FUNCTION__);
|
||||
abort();
|
||||
}
|
||||
else if (bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED)) {
|
||||
|
@ -807,16 +815,16 @@ dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake)
|
|||
{
|
||||
struct block *block, *tmp;
|
||||
|
||||
bufmgr_fake->performed_rendering = GL_FALSE;
|
||||
bufmgr_fake->performed_rendering = 0;
|
||||
/* okay for ever BO that is on the HW kick it off.
|
||||
seriously not afraid of the POLICE right now */
|
||||
foreach_s(block, tmp, &bufmgr_fake->on_hardware) {
|
||||
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
|
||||
|
||||
block->on_hardware = 0;
|
||||
free_block(bufmgr_fake, block);
|
||||
bo_fake->block = NULL;
|
||||
bo_fake->validated = GL_FALSE;
|
||||
bo_fake->validated = 0;
|
||||
if (!(bo_fake->flags & BM_NO_BACKING_STORE))
|
||||
bo_fake->dirty = 1;
|
||||
}
|
||||
|
@ -890,9 +898,10 @@ dri_fake_bo_validate(dri_bo *bo)
|
|||
|
||||
bo_fake->block->fenced = 0;
|
||||
bo_fake->block->on_hardware = 1;
|
||||
move_to_tail(&bufmgr_fake->on_hardware, bo_fake->block);
|
||||
DRMLISTDEL(bo_fake->block);
|
||||
DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware);
|
||||
|
||||
bo_fake->validated = GL_TRUE;
|
||||
bo_fake->validated = 1;
|
||||
bufmgr_fake->need_fence = 1;
|
||||
|
||||
return 0;
|
||||
|
@ -915,7 +924,7 @@ dri_fake_destroy(dri_bufmgr *bufmgr)
|
|||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
|
||||
mmDestroy(bufmgr_fake->heap);
|
||||
drmmmDestroy(bufmgr_fake->heap);
|
||||
free(bufmgr);
|
||||
}
|
||||
|
||||
|
@ -1018,9 +1027,9 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo)
|
|||
reloc_data = r->target_buf->offset + r->delta;
|
||||
|
||||
if (bo->virtual == NULL)
|
||||
dri_bo_map(bo, GL_TRUE);
|
||||
dri_bo_map(bo, 1);
|
||||
|
||||
*(uint32_t *)(bo->virtual + r->offset) = reloc_data;
|
||||
*(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data;
|
||||
|
||||
r->last_target_offset = r->target_buf->offset;
|
||||
}
|
||||
|
@ -1036,7 +1045,7 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo)
|
|||
|
||||
bo_fake->card_dirty = 1;
|
||||
}
|
||||
bufmgr_fake->performed_rendering = GL_TRUE;
|
||||
bufmgr_fake->performed_rendering = 1;
|
||||
}
|
||||
|
||||
return dri_fake_bo_validate(bo);
|
||||
|
@ -1050,7 +1059,7 @@ dri_fake_process_relocs(dri_bo *batch_buf)
|
|||
int ret;
|
||||
int retry_count = 0;
|
||||
|
||||
bufmgr_fake->performed_rendering = GL_FALSE;
|
||||
bufmgr_fake->performed_rendering = 0;
|
||||
|
||||
dri_fake_calculate_domains(batch_buf);
|
||||
|
||||
|
@ -1066,7 +1075,7 @@ dri_fake_process_relocs(dri_bo *batch_buf)
|
|||
bufmgr_fake->fail = 0;
|
||||
goto restart;
|
||||
} else /* dump out the memory here */
|
||||
mmDumpMemInfo(bufmgr_fake->heap);
|
||||
drmmmDumpMemInfo(bufmgr_fake->heap);
|
||||
}
|
||||
|
||||
assert(ret == 0);
|
||||
|
@ -1095,7 +1104,7 @@ dri_bo_fake_post_submit(dri_bo *bo)
|
|||
}
|
||||
|
||||
assert(bo_fake->map_count == 0);
|
||||
bo_fake->validated = GL_FALSE;
|
||||
bo_fake->validated = 0;
|
||||
bo_fake->read_domains = 0;
|
||||
bo_fake->write_domain = 0;
|
||||
}
|
||||
|
@ -1114,7 +1123,7 @@ dri_fake_check_aperture_space(dri_bo *bo)
|
|||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
GLuint sz;
|
||||
unsigned int sz;
|
||||
|
||||
sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
|
||||
|
||||
|
@ -1133,40 +1142,39 @@ dri_fake_check_aperture_space(dri_bo *bo)
|
|||
}
|
||||
|
||||
dri_bufmgr *
|
||||
dri_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
|
||||
unsigned long size,
|
||||
unsigned int (*fence_emit)(void *private),
|
||||
int (*fence_wait)(void *private, unsigned int cookie),
|
||||
void *driver_priv)
|
||||
intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
|
||||
unsigned long size,
|
||||
unsigned int (*fence_emit)(void *private),
|
||||
int (*fence_wait)(void *private, unsigned int cookie),
|
||||
void *driver_priv)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake;
|
||||
|
||||
bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
|
||||
|
||||
/* Initialize allocator */
|
||||
make_empty_list(&bufmgr_fake->fenced);
|
||||
make_empty_list(&bufmgr_fake->on_hardware);
|
||||
make_empty_list(&bufmgr_fake->lru);
|
||||
DRMINITLISTHEAD(&bufmgr_fake->fenced);
|
||||
DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
|
||||
DRMINITLISTHEAD(&bufmgr_fake->lru);
|
||||
|
||||
bufmgr_fake->low_offset = low_offset;
|
||||
bufmgr_fake->virtual = low_virtual;
|
||||
bufmgr_fake->size = size;
|
||||
bufmgr_fake->heap = mmInit(low_offset, size);
|
||||
bufmgr_fake->heap = drmmmInit(low_offset, size);
|
||||
|
||||
/* Hook in methods */
|
||||
bufmgr_fake->bufmgr.bo_alloc = dri_fake_bo_alloc;
|
||||
bufmgr_fake->bufmgr.bo_alloc_static = dri_fake_bo_alloc_static;
|
||||
bufmgr_fake->bufmgr.bo_reference = dri_fake_bo_reference;
|
||||
bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference;
|
||||
bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map;
|
||||
bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap;
|
||||
bufmgr_fake->bufmgr.bo_wait_rendering = dri_fake_bo_wait_rendering;
|
||||
bufmgr_fake->bufmgr.destroy = dri_fake_destroy;
|
||||
bufmgr_fake->bufmgr.emit_reloc = dri_fake_emit_reloc;
|
||||
bufmgr_fake->bufmgr.process_relocs = dri_fake_process_relocs;
|
||||
bufmgr_fake->bufmgr.post_submit = dri_fake_post_submit;
|
||||
bufmgr_fake->bufmgr.check_aperture_space = dri_fake_check_aperture_space;
|
||||
bufmgr_fake->bufmgr.debug = GL_FALSE;
|
||||
bufmgr_fake->bufmgr.debug = 0;
|
||||
bufmgr_fake->intel_bufmgr.emit_reloc = dri_fake_emit_reloc;
|
||||
|
||||
bufmgr_fake->fence_emit = fence_emit;
|
||||
bufmgr_fake->fence_wait = fence_wait;
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2007 Intel Corporation
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
|
||||
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
|
||||
* Eric Anholt <eric@anholt.net>
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_BUFMGR_FAKE_H_
|
||||
#define _INTEL_BUFMGR_FAKE_H_
|
||||
|
||||
void dri_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr);
|
||||
dri_bufmgr *dri_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
|
||||
unsigned long size,
|
||||
unsigned int (*fence_emit)(void *private),
|
||||
int (*fence_wait)(void *private,
|
||||
unsigned int cookie),
|
||||
void *driver_priv);
|
||||
void dri_bo_fake_disable_backing_store(dri_bo *bo,
|
||||
void (*invalidate_cb)(dri_bo *bo,
|
||||
void *ptr),
|
||||
void *ptr);
|
||||
#endif /* _INTEL_BUFMGR_FAKE_H_ */
|
||||
|
|
@ -44,15 +44,12 @@
|
|||
#include <sys/mman.h>
|
||||
|
||||
#include "errno.h"
|
||||
#include "mtypes.h"
|
||||
#include "dri_bufmgr.h"
|
||||
#include "intel_bufmgr.h"
|
||||
#include "string.h"
|
||||
#include "imports.h"
|
||||
|
||||
#include "i915_drm.h"
|
||||
|
||||
#include "intel_bufmgr_gem.h"
|
||||
|
||||
#define DBG(...) do { \
|
||||
if (bufmgr_gem->bufmgr.debug) \
|
||||
fprintf(stderr, __VA_ARGS__); \
|
||||
|
@ -89,9 +86,11 @@ struct dri_gem_bo_bucket {
|
|||
typedef struct _dri_bufmgr_gem {
|
||||
dri_bufmgr bufmgr;
|
||||
|
||||
struct intel_bufmgr intel_bufmgr;
|
||||
|
||||
int fd;
|
||||
|
||||
uint32_t max_relocs;
|
||||
int max_relocs;
|
||||
|
||||
struct drm_i915_gem_exec_object *exec_objects;
|
||||
dri_bo **exec_bos;
|
||||
|
@ -108,7 +107,8 @@ typedef struct _dri_bo_gem {
|
|||
dri_bo bo;
|
||||
|
||||
int refcount;
|
||||
GLboolean mapped;
|
||||
/** Boolean whether the mmap ioctl has been called for this buffer yet. */
|
||||
int mapped;
|
||||
uint32_t gem_handle;
|
||||
const char *name;
|
||||
|
||||
|
@ -119,11 +119,11 @@ typedef struct _dri_bo_gem {
|
|||
int validate_index;
|
||||
|
||||
/**
|
||||
* Tracks whether set_domain to CPU is current
|
||||
* Boolean whether set_domain to CPU is current
|
||||
* Set when set_domain has been called
|
||||
* Cleared when a batch has been submitted
|
||||
*/
|
||||
GLboolean cpu_domain_set;
|
||||
int cpu_domain_set;
|
||||
|
||||
/** Array passed to the DRM containing relocation information. */
|
||||
struct drm_i915_gem_relocation_entry *relocs;
|
||||
|
@ -138,8 +138,8 @@ typedef struct _dri_bo_gem {
|
|||
static int
|
||||
logbase2(int n)
|
||||
{
|
||||
GLint i = 1;
|
||||
GLint log2 = 0;
|
||||
int i = 1;
|
||||
int log2 = 0;
|
||||
|
||||
while (n > i) {
|
||||
i *= 2;
|
||||
|
@ -262,15 +262,14 @@ intel_setup_reloc_list(dri_bo *bo)
|
|||
|
||||
static dri_bo *
|
||||
dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment,
|
||||
uint64_t location_mask)
|
||||
unsigned long size, unsigned int alignment)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
|
||||
dri_bo_gem *bo_gem;
|
||||
unsigned int page_size = getpagesize();
|
||||
int ret;
|
||||
struct dri_gem_bo_bucket *bucket;
|
||||
GLboolean alloc_from_cache = GL_FALSE;
|
||||
int alloc_from_cache = 0;
|
||||
|
||||
bo_gem = calloc(1, sizeof(*bo_gem));
|
||||
if (!bo_gem)
|
||||
|
@ -338,18 +337,6 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
|
|||
return &bo_gem->bo;
|
||||
}
|
||||
|
||||
/* Our GEM backend doesn't allow creation of static buffers, as that requires
|
||||
* privelege for the non-fake case, and the lock in the fake case where we were
|
||||
* working around the X Server not creating buffers and passing handles to us.
|
||||
*/
|
||||
static dri_bo *
|
||||
dri_gem_bo_alloc_static(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned long offset, unsigned long size, void *virtual,
|
||||
uint64_t location_mask)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a dri_bo wrapping the given buffer object handle.
|
||||
*
|
||||
|
@ -357,7 +344,7 @@ dri_gem_bo_alloc_static(dri_bufmgr *bufmgr, const char *name,
|
|||
* to another.
|
||||
*/
|
||||
dri_bo *
|
||||
intel_gem_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
|
||||
intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned int handle)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
|
||||
|
@ -465,7 +452,7 @@ dri_gem_bo_unreference(dri_bo *bo)
|
|||
}
|
||||
|
||||
static int
|
||||
dri_gem_bo_map(dri_bo *bo, GLboolean write_enable)
|
||||
dri_gem_bo_map(dri_bo *bo, int write_enable)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
|
@ -499,7 +486,7 @@ dri_gem_bo_map(dri_bo *bo, GLboolean write_enable)
|
|||
bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
|
||||
}
|
||||
bo->virtual = bo_gem->virtual;
|
||||
bo_gem->mapped = GL_TRUE;
|
||||
bo_gem->mapped = 1;
|
||||
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual);
|
||||
}
|
||||
|
||||
|
@ -514,7 +501,7 @@ dri_gem_bo_map(dri_bo *bo, GLboolean write_enable)
|
|||
bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
|
||||
strerror (errno));
|
||||
}
|
||||
bo_gem->cpu_domain_set = GL_TRUE;
|
||||
bo_gem->cpu_domain_set = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -763,7 +750,7 @@ dri_gem_post_submit(dri_bo *batch_buf)
|
|||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
|
||||
/* Need to call set_domain on next bo_map */
|
||||
bo_gem->cpu_domain_set = GL_FALSE;
|
||||
bo_gem->cpu_domain_set = 0;
|
||||
|
||||
/* Disconnect the buffer from the validate list */
|
||||
bo_gem->validate_index = -1;
|
||||
|
@ -781,7 +768,7 @@ dri_gem_post_submit(dri_bo *batch_buf)
|
|||
* in flight at once.
|
||||
*/
|
||||
void
|
||||
intel_gem_enable_bo_reuse(dri_bufmgr *bufmgr)
|
||||
intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
|
||||
int i;
|
||||
|
@ -824,7 +811,6 @@ intel_bufmgr_gem_init(int fd, int batch_size)
|
|||
bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
|
||||
|
||||
bufmgr_gem->bufmgr.bo_alloc = dri_gem_bo_alloc;
|
||||
bufmgr_gem->bufmgr.bo_alloc_static = dri_gem_bo_alloc_static;
|
||||
bufmgr_gem->bufmgr.bo_reference = dri_gem_bo_reference;
|
||||
bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference;
|
||||
bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map;
|
||||
|
@ -833,11 +819,11 @@ intel_bufmgr_gem_init(int fd, int batch_size)
|
|||
bufmgr_gem->bufmgr.bo_get_subdata = dri_gem_bo_get_subdata;
|
||||
bufmgr_gem->bufmgr.bo_wait_rendering = dri_gem_bo_wait_rendering;
|
||||
bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy;
|
||||
bufmgr_gem->bufmgr.emit_reloc = dri_gem_emit_reloc;
|
||||
bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc;
|
||||
bufmgr_gem->bufmgr.post_submit = dri_gem_post_submit;
|
||||
bufmgr_gem->bufmgr.debug = GL_FALSE;
|
||||
bufmgr_gem->bufmgr.debug = 0;
|
||||
bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space;
|
||||
bufmgr_gem->intel_bufmgr.emit_reloc = dri_gem_emit_reloc;
|
||||
/* Initialize the linked lists for BO reuse cache. */
|
||||
for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++)
|
||||
bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
|
||||
|
@ -845,3 +831,15 @@ intel_bufmgr_gem_init(int fd, int batch_size)
|
|||
return &bufmgr_gem->bufmgr;
|
||||
}
|
||||
|
||||
int
|
||||
intel_bo_emit_reloc(dri_bo *reloc_buf,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta, uint32_t offset, dri_bo *target_buf)
|
||||
{
|
||||
struct intel_bufmgr *intel_bufmgr;
|
||||
|
||||
intel_bufmgr = (struct intel_bufmgr *)(reloc_buf->bufmgr + 1);
|
||||
|
||||
return intel_bufmgr->emit_reloc(reloc_buf, read_domains, write_domain,
|
||||
delta, offset, target_buf);
|
||||
}
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
|
||||
#ifndef INTEL_BUFMGR_GEM_H
|
||||
#define INTEL_BUFMGR_GEM_H
|
||||
|
||||
#include "dri_bufmgr.h"
|
||||
|
||||
extern dri_bo *intel_gem_bo_create_from_handle(dri_bufmgr *bufmgr,
|
||||
const char *name,
|
||||
unsigned int handle);
|
||||
|
||||
dri_bufmgr *intel_bufmgr_gem_init(int fd, int batch_size);
|
||||
|
||||
void
|
||||
intel_gem_enable_bo_reuse(dri_bufmgr *bufmgr);
|
||||
|
||||
#endif /* INTEL_BUFMGR_GEM_H */
|
|
@ -0,0 +1,281 @@
|
|||
/*
|
||||
* GLX Hardware Device Driver common code
|
||||
* Copyright (C) 1999 Wittawat Yamwong
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
|
||||
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include "xf86drm.h"
|
||||
#include "mm.h"
|
||||
|
||||
void
|
||||
drmmmDumpMemInfo(const struct mem_block *heap)
|
||||
{
|
||||
drmMsg("Memory heap %p:\n", (void *)heap);
|
||||
if (heap == 0) {
|
||||
drmMsg(" heap == 0\n");
|
||||
} else {
|
||||
const struct mem_block *p;
|
||||
|
||||
for(p = heap->next; p != heap; p = p->next) {
|
||||
drmMsg(" Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
|
||||
p->free ? 'F':'.',
|
||||
p->reserved ? 'R':'.');
|
||||
}
|
||||
|
||||
drmMsg("\nFree list:\n");
|
||||
|
||||
for(p = heap->next_free; p != heap; p = p->next_free) {
|
||||
drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
|
||||
p->free ? 'F':'.',
|
||||
p->reserved ? 'R':'.');
|
||||
}
|
||||
|
||||
}
|
||||
drmMsg("End of memory blocks\n");
|
||||
}
|
||||
|
||||
struct mem_block *
|
||||
drmmmInit(int ofs, int size)
|
||||
{
|
||||
struct mem_block *heap, *block;
|
||||
|
||||
if (size <= 0)
|
||||
return NULL;
|
||||
|
||||
heap = (struct mem_block *) calloc(1, sizeof(struct mem_block));
|
||||
if (!heap)
|
||||
return NULL;
|
||||
|
||||
block = (struct mem_block *) calloc(1, sizeof(struct mem_block));
|
||||
if (!block) {
|
||||
free(heap);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
heap->next = block;
|
||||
heap->prev = block;
|
||||
heap->next_free = block;
|
||||
heap->prev_free = block;
|
||||
|
||||
block->heap = heap;
|
||||
block->next = heap;
|
||||
block->prev = heap;
|
||||
block->next_free = heap;
|
||||
block->prev_free = heap;
|
||||
|
||||
block->ofs = ofs;
|
||||
block->size = size;
|
||||
block->free = 1;
|
||||
|
||||
return heap;
|
||||
}
|
||||
|
||||
|
||||
static struct mem_block *
|
||||
SliceBlock(struct mem_block *p,
|
||||
int startofs, int size,
|
||||
int reserved, int alignment)
|
||||
{
|
||||
struct mem_block *newblock;
|
||||
|
||||
/* break left [p, newblock, p->next], then p = newblock */
|
||||
if (startofs > p->ofs) {
|
||||
newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
|
||||
if (!newblock)
|
||||
return NULL;
|
||||
newblock->ofs = startofs;
|
||||
newblock->size = p->size - (startofs - p->ofs);
|
||||
newblock->free = 1;
|
||||
newblock->heap = p->heap;
|
||||
|
||||
newblock->next = p->next;
|
||||
newblock->prev = p;
|
||||
p->next->prev = newblock;
|
||||
p->next = newblock;
|
||||
|
||||
newblock->next_free = p->next_free;
|
||||
newblock->prev_free = p;
|
||||
p->next_free->prev_free = newblock;
|
||||
p->next_free = newblock;
|
||||
|
||||
p->size -= newblock->size;
|
||||
p = newblock;
|
||||
}
|
||||
|
||||
/* break right, also [p, newblock, p->next] */
|
||||
if (size < p->size) {
|
||||
newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
|
||||
if (!newblock)
|
||||
return NULL;
|
||||
newblock->ofs = startofs + size;
|
||||
newblock->size = p->size - size;
|
||||
newblock->free = 1;
|
||||
newblock->heap = p->heap;
|
||||
|
||||
newblock->next = p->next;
|
||||
newblock->prev = p;
|
||||
p->next->prev = newblock;
|
||||
p->next = newblock;
|
||||
|
||||
newblock->next_free = p->next_free;
|
||||
newblock->prev_free = p;
|
||||
p->next_free->prev_free = newblock;
|
||||
p->next_free = newblock;
|
||||
|
||||
p->size = size;
|
||||
}
|
||||
|
||||
/* p = middle block */
|
||||
p->free = 0;
|
||||
|
||||
/* Remove p from the free list:
|
||||
*/
|
||||
p->next_free->prev_free = p->prev_free;
|
||||
p->prev_free->next_free = p->next_free;
|
||||
|
||||
p->next_free = 0;
|
||||
p->prev_free = 0;
|
||||
|
||||
p->reserved = reserved;
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
struct mem_block *
|
||||
drmmmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
|
||||
{
|
||||
struct mem_block *p;
|
||||
const int mask = (1 << align2)-1;
|
||||
int startofs = 0;
|
||||
int endofs;
|
||||
|
||||
if (!heap || align2 < 0 || size <= 0)
|
||||
return NULL;
|
||||
|
||||
for (p = heap->next_free; p != heap; p = p->next_free) {
|
||||
assert(p->free);
|
||||
|
||||
startofs = (p->ofs + mask) & ~mask;
|
||||
if ( startofs < startSearch ) {
|
||||
startofs = startSearch;
|
||||
}
|
||||
endofs = startofs+size;
|
||||
if (endofs <= (p->ofs+p->size))
|
||||
break;
|
||||
}
|
||||
|
||||
if (p == heap)
|
||||
return NULL;
|
||||
|
||||
assert(p->free);
|
||||
p = SliceBlock(p,startofs,size,0,mask+1);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
struct mem_block *
|
||||
drmmmFindBlock(struct mem_block *heap, int start)
|
||||
{
|
||||
struct mem_block *p;
|
||||
|
||||
for (p = heap->next; p != heap; p = p->next) {
|
||||
if (p->ofs == start)
|
||||
return p;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
Join2Blocks(struct mem_block *p)
|
||||
{
|
||||
/* XXX there should be some assertions here */
|
||||
|
||||
/* NOTE: heap->free == 0 */
|
||||
|
||||
if (p->free && p->next->free) {
|
||||
struct mem_block *q = p->next;
|
||||
|
||||
assert(p->ofs + p->size == q->ofs);
|
||||
p->size += q->size;
|
||||
|
||||
p->next = q->next;
|
||||
q->next->prev = p;
|
||||
|
||||
q->next_free->prev_free = q->prev_free;
|
||||
q->prev_free->next_free = q->next_free;
|
||||
|
||||
free(q);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
drmmmFreeMem(struct mem_block *b)
|
||||
{
|
||||
if (!b)
|
||||
return 0;
|
||||
|
||||
if (b->free) {
|
||||
drmMsg("block already free\n");
|
||||
return -1;
|
||||
}
|
||||
if (b->reserved) {
|
||||
drmMsg("block is reserved\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
b->free = 1;
|
||||
b->next_free = b->heap->next_free;
|
||||
b->prev_free = b->heap;
|
||||
b->next_free->prev_free = b;
|
||||
b->prev_free->next_free = b;
|
||||
|
||||
Join2Blocks(b);
|
||||
if (b->prev != b->heap)
|
||||
Join2Blocks(b->prev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
drmmmDestroy(struct mem_block *heap)
|
||||
{
|
||||
struct mem_block *p;
|
||||
|
||||
if (!heap)
|
||||
return;
|
||||
|
||||
for (p = heap->next; p != heap; ) {
|
||||
struct mem_block *next = p->next;
|
||||
free(p);
|
||||
p = next;
|
||||
}
|
||||
|
||||
free(heap);
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* GLX Hardware Device Driver common code
|
||||
* Copyright (C) 1999 Wittawat Yamwong
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* KEITH WHITWELL, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
|
||||
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* Memory manager code. Primarily used by device drivers to manage texture
|
||||
* heaps, etc.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef MM_H
|
||||
#define MM_H
|
||||
|
||||
struct mem_block {
|
||||
struct mem_block *next, *prev;
|
||||
struct mem_block *next_free, *prev_free;
|
||||
struct mem_block *heap;
|
||||
int ofs,size;
|
||||
unsigned int free:1;
|
||||
unsigned int reserved:1;
|
||||
};
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* input: total size in bytes
|
||||
* return: a heap pointer if OK, NULL if error
|
||||
*/
|
||||
extern struct mem_block *drmmmInit(int ofs, int size);
|
||||
|
||||
/**
|
||||
* Allocate 'size' bytes with 2^align2 bytes alignment,
|
||||
* restrict the search to free memory after 'startSearch'
|
||||
* depth and back buffers should be in different 4mb banks
|
||||
* to get better page hits if possible
|
||||
* input: size = size of block
|
||||
* align2 = 2^align2 bytes alignment
|
||||
* startSearch = linear offset from start of heap to begin search
|
||||
* return: pointer to the allocated block, 0 if error
|
||||
*/
|
||||
extern struct mem_block *drmmmAllocMem(struct mem_block *heap, int size,
|
||||
int align2, int startSearch);
|
||||
|
||||
/**
|
||||
* Free block starts at offset
|
||||
* input: pointer to a block
|
||||
* return: 0 if OK, -1 if error
|
||||
*/
|
||||
extern int drmmmFreeMem(struct mem_block *b);
|
||||
|
||||
/**
|
||||
* Free block starts at offset
|
||||
* input: pointer to a heap, start offset
|
||||
* return: pointer to a block
|
||||
*/
|
||||
extern struct mem_block *drmmmFindBlock(struct mem_block *heap, int start);
|
||||
|
||||
/**
|
||||
* destroy MM
|
||||
*/
|
||||
extern void drmmmDestroy(struct mem_block *mmInit);
|
||||
|
||||
/**
|
||||
* For debuging purpose.
|
||||
*/
|
||||
extern void drmmmDumpMemInfo(const struct mem_block *mmInit);
|
||||
|
||||
#endif
|
|
@ -110,7 +110,7 @@ static int drmDebugPrint(const char *format, va_list ap)
|
|||
|
||||
static int (*drm_debug_print)(const char *format, va_list ap) = drmDebugPrint;
|
||||
|
||||
static void
|
||||
void
|
||||
drmMsg(const char *format, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
|
|
@ -657,6 +657,7 @@ extern int drmSLLookupNeighbors(void *l, unsigned long key,
|
|||
|
||||
extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);
|
||||
extern void drmCloseOnce(int fd);
|
||||
extern void drmMsg(const char *format, ...);
|
||||
|
||||
#include "xf86mm.h"
|
||||
|
||||
|
|
|
@ -94,6 +94,18 @@ typedef struct _drmMMListHead
|
|||
#define DRMLISTENTRY(__type, __item, __field) \
|
||||
((__type *)(((char *) (__item)) - offsetof(__type, __field)))
|
||||
|
||||
#define DRMLISTEMPTY(__item) ((__item)->next == (__item))
|
||||
|
||||
#define DRMLISTFOREACHSAFE(__item, __temp, __list) \
|
||||
for ((__item) = (__list)->next, (__temp) = (__item)->next; \
|
||||
(__item) != (__list); \
|
||||
(__item) = (__temp), (__temp) = (__item)->next)
|
||||
|
||||
#define DRMLISTFOREACHSAFEREVERSE(__item, __temp, __list) \
|
||||
for ((__item) = (__list)->prev, (__temp) = (__item)->prev; \
|
||||
(__item) != (__list); \
|
||||
(__item) = (__temp), (__temp) = (__item)->prev)
|
||||
|
||||
typedef struct _drmFence
|
||||
{
|
||||
unsigned handle;
|
||||
|
|
Loading…
Reference in New Issue