Merge branch 'drm-gem'

Conflicts:

	shared-core/i915_dma.c

This brings in kernel support and userland interface for intel GEM.
main
Eric Anholt 2008-08-08 14:05:01 -07:00
commit e1b8e79796
46 changed files with 9472 additions and 274 deletions

3
.gitignore vendored
View File

@ -58,6 +58,9 @@ tests/getclient
tests/getstats
tests/getversion
tests/lock
tests/gem_basic
tests/gem_mmap
tests/gem_readwrite
tests/openclose
tests/setversion
tests/updatedraw

View File

@ -19,7 +19,7 @@
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
AC_PREREQ(2.57)
AC_INIT([libdrm], 2.3.1, [dri-devel@lists.sourceforge.net], libdrm)
AC_INIT([libdrm], 2.4.0, [dri-devel@lists.sourceforge.net], libdrm)
AC_CONFIG_SRCDIR([Makefile.am])
AM_INIT_AUTOMAKE([dist-bzip2])
@ -35,9 +35,77 @@ AC_SYS_LARGEFILE
pkgconfigdir=${libdir}/pkgconfig
AC_SUBST(pkgconfigdir)
dnl ===========================================================================
dnl check compiler flags
AC_DEFUN([LIBDRM_CC_TRY_FLAG], [
AC_MSG_CHECKING([whether $CC supports $1])
libdrm_save_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $1"
AC_COMPILE_IFELSE([ ], [libdrm_cc_flag=yes], [libdrm_cc_flag=no])
CFLAGS="$libdrm_save_CFLAGS"
if test "x$libdrm_cc_flag" = "xyes"; then
ifelse([$2], , :, [$2])
else
ifelse([$3], , :, [$3])
fi
AC_MSG_RESULT([$libdrm_cc_flag])
])
dnl Use lots of warning flags with with gcc and compatible compilers
dnl Note: if you change the following variable, the cache is automatically
dnl skipped and all flags rechecked. So there's no need to do anything
dnl else. If for any reason you need to force a recheck, just change
dnl MAYBE_WARN in an ignorable way (like adding whitespace)
MAYBE_WARN="-Wall -Wextra \
-Wsign-compare -Werror-implicit-function-declaration \
-Wpointer-arith -Wwrite-strings -Wstrict-prototypes \
-Wmissing-prototypes -Wmissing-declarations -Wnested-externs \
-Wpacked -Wswitch-enum -Wmissing-format-attribute \
-Wstrict-aliasing=2 -Winit-self -Wunsafe-loop-optimizations \
-Wdeclaration-after-statement -Wold-style-definition \
-Wno-missing-field-initializers -Wno-unused-parameter \
-Wno-attributes -Wno-long-long -Winline"
# invalidate cached value if MAYBE_WARN has changed
if test "x$libdrm_cv_warn_maybe" != "x$MAYBE_WARN"; then
unset libdrm_cv_warn_cflags
fi
AC_CACHE_CHECK([for supported warning flags], libdrm_cv_warn_cflags, [
echo
WARN_CFLAGS=""
# Some warning options are not supported by all versions of
# gcc, so test all desired options against the current
# compiler.
#
# Note that there are some order dependencies
# here. Specifically, an option that disables a warning will
# have no net effect if a later option then enables that
# warnings, (perhaps implicitly). So we put some grouped
# options (-Wall and -Wextra) up front and the -Wno options
# last.
for W in $MAYBE_WARN; do
LIBDRM_CC_TRY_FLAG([$W], [WARN_CFLAGS="$WARN_CFLAGS $W"])
done
libdrm_cv_warn_cflags=$WARN_CFLAGS
libdrm_cv_warn_maybe=$MAYBE_WARN
AC_MSG_CHECKING([which warning flags were supported])])
WARN_CFLAGS="$libdrm_cv_warn_cflags"
AC_SUBST(WARN_CFLAGS)
AC_OUTPUT([
Makefile
libdrm/Makefile
libdrm/intel/Makefile
shared-core/Makefile
tests/Makefile
libdrm.pc])

View File

@ -18,14 +18,18 @@
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
SUBDIRS = intel
libdrm_la_LTLIBRARIES = libdrm.la
libdrm_ladir = $(libdir)
libdrm_la_LDFLAGS = -version-number 2:3:0 -no-undefined
AM_CFLAGS = -I$(top_srcdir)/shared-core
libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c
libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c \
dri_bufmgr.c
libdrm_la_LIBADD = intel/libdrm_intel.la
libdrmincludedir = ${includedir}
libdrminclude_HEADERS = xf86drm.h xf86mm.h
libdrminclude_HEADERS = xf86drm.h xf86mm.h dri_bufmgr.h
EXTRA_DIST = ChangeLog TODO

141
libdrm/dri_bufmgr.c Normal file
View File

@ -0,0 +1,141 @@
/*
* Copyright © 2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include "dri_bufmgr.h"
/** @file dri_bufmgr.c
*
* Convenience functions for buffer management methods.
*/
dri_bo *
dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
unsigned int alignment)
{
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
void
dri_bo_reference(dri_bo *bo)
{
bo->bufmgr->bo_reference(bo);
}
void
dri_bo_unreference(dri_bo *bo)
{
if (bo == NULL)
return;
bo->bufmgr->bo_unreference(bo);
}
int
dri_bo_map(dri_bo *buf, int write_enable)
{
return buf->bufmgr->bo_map(buf, write_enable);
}
int
dri_bo_unmap(dri_bo *buf)
{
return buf->bufmgr->bo_unmap(buf);
}
int
dri_bo_subdata(dri_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
int ret;
if (bo->bufmgr->bo_subdata)
return bo->bufmgr->bo_subdata(bo, offset, size, data);
if (size == 0 || data == NULL)
return 0;
ret = dri_bo_map(bo, 1);
if (ret)
return ret;
memcpy((unsigned char *)bo->virtual + offset, data, size);
dri_bo_unmap(bo);
return 0;
}
int
dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
int ret;
if (bo->bufmgr->bo_subdata)
return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
if (size == 0 || data == NULL)
return 0;
ret = dri_bo_map(bo, 0);
if (ret)
return ret;
memcpy(data, (unsigned char *)bo->virtual + offset, size);
dri_bo_unmap(bo);
return 0;
}
void
dri_bo_wait_rendering(dri_bo *bo)
{
bo->bufmgr->bo_wait_rendering(bo);
}
void
dri_bufmgr_destroy(dri_bufmgr *bufmgr)
{
bufmgr->destroy(bufmgr);
}
void *dri_process_relocs(dri_bo *batch_buf)
{
return batch_buf->bufmgr->process_relocs(batch_buf);
}
void dri_post_submit(dri_bo *batch_buf)
{
batch_buf->bufmgr->post_submit(batch_buf);
}
void
dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug)
{
bufmgr->debug = enable_debug;
}
int
dri_bufmgr_check_aperture_space(dri_bo **bo_array, int count)
{
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
}

174
libdrm/dri_bufmgr.h Normal file
View File

@ -0,0 +1,174 @@
/**************************************************************************
*
* Copyright © 2007 Intel Corporation
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
* Eric Anholt <eric@anholt.net>
*/
#ifndef _DRI_BUFMGR_H_
#define _DRI_BUFMGR_H_
#include <xf86drm.h>
typedef struct _dri_bufmgr dri_bufmgr;
typedef struct _dri_bo dri_bo;
struct _dri_bo {
/**
* Size in bytes of the buffer object.
*
* The size may be larger than the size originally requested for the
* allocation, such as being aligned to page size.
*/
unsigned long size;
/**
* Card virtual address (offset from the beginning of the aperture) for the
* object. Only valid while validated.
*/
unsigned long offset;
/**
* Virtual address for accessing the buffer data. Only valid while mapped.
*/
void *virtual;
/** Buffer manager context associated with this buffer object */
dri_bufmgr *bufmgr;
};
/**
* Context for a buffer manager instance.
*
* Contains public methods followed by private storage for the buffer manager.
*/
struct _dri_bufmgr {
/**
* Allocate a buffer object.
*
* Buffer objects are not necessarily initially mapped into CPU virtual
* address space or graphics device aperture. They must be mapped using
* bo_map() to be used by the CPU, and validated for use using bo_validate()
* to be used from the graphics device.
*/
dri_bo *(*bo_alloc)(dri_bufmgr *bufmgr_ctx, const char *name,
unsigned long size, unsigned int alignment);
/** Takes a reference on a buffer object */
void (*bo_reference)(dri_bo *bo);
/**
* Releases a reference on a buffer object, freeing the data if
* rerefences remain.
*/
void (*bo_unreference)(dri_bo *bo);
/**
* Maps the buffer into userspace.
*
* This function will block waiting for any existing execution on the
* buffer to complete, first. The resulting mapping is available at
* buf->virtual.
*/
int (*bo_map)(dri_bo *buf, int write_enable);
/** Reduces the refcount on the userspace mapping of the buffer object. */
int (*bo_unmap)(dri_bo *buf);
/**
* Write data into an object.
*
* This is an optional function, if missing,
* dri_bo will map/memcpy/unmap.
*/
int (*bo_subdata) (dri_bo *buf, unsigned long offset,
unsigned long size, const void *data);
/**
* Read data from an object
*
* This is an optional function, if missing,
* dri_bo will map/memcpy/unmap.
*/
int (*bo_get_subdata) (dri_bo *bo, unsigned long offset,
unsigned long size, void *data);
/**
* Waits for rendering to an object by the GPU to have completed.
*
* This is not required for any access to the BO by bo_map, bo_subdata, etc.
* It is merely a way for the driver to implement glFinish.
*/
void (*bo_wait_rendering) (dri_bo *bo);
/**
* Tears down the buffer manager instance.
*/
void (*destroy)(dri_bufmgr *bufmgr);
/**
* Processes the relocations, either in userland or by converting the list
* for use in batchbuffer submission.
*
* Kernel-based implementations will return a pointer to the arguments
* to be handed with batchbuffer submission to the kernel. The userland
* implementation performs the buffer validation and emits relocations
* into them the appopriate order.
*
* \param batch_buf buffer at the root of the tree of relocations
* \return argument to be completed and passed to the execbuffers ioctl
* (if any).
*/
void *(*process_relocs)(dri_bo *batch_buf);
void (*post_submit)(dri_bo *batch_buf);
int (*check_aperture_space)(dri_bo **bo_array, int count);
int debug; /**< Enables verbose debugging printouts */
};
dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
unsigned int alignment);
void dri_bo_reference(dri_bo *bo);
void dri_bo_unreference(dri_bo *bo);
int dri_bo_map(dri_bo *buf, int write_enable);
int dri_bo_unmap(dri_bo *buf);
int dri_bo_subdata(dri_bo *bo, unsigned long offset,
unsigned long size, const void *data);
int dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
unsigned long size, void *data);
void dri_bo_wait_rendering(dri_bo *bo);
void dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug);
void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
void *dri_process_relocs(dri_bo *batch_buf);
void dri_post_process_relocs(dri_bo *batch_buf);
void dri_post_submit(dri_bo *batch_buf);
int dri_bufmgr_check_aperture_space(dri_bo **bo_array, int count);
#endif

38
libdrm/intel/Makefile.am Normal file
View File

@ -0,0 +1,38 @@
# Copyright © 2008 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Eric Anholt <eric@anholt.net>
AM_CFLAGS = \
$(WARN_CFLAGS) \
-I$(top_srcdir)/shared-core
noinst_LTLIBRARIES = libdrm_intel.la
libdrm_intel_la_SOURCES = \
intel_bufmgr_fake.c \
intel_bufmgr_gem.c \
mm.c \
mm.h
libdrm_intelincludedir = ${includedir}
libdrm_intelinclude_HEADERS = intel_bufmgr.h

130
libdrm/intel/intel_bufmgr.h Normal file
View File

@ -0,0 +1,130 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
/**
* @file intel_bufmgr.h
*
* Public definitions of Intel-specific bufmgr functions.
*/
#ifndef INTEL_BUFMGR_GEM_H
#define INTEL_BUFMGR_GEM_H
#include "dri_bufmgr.h"
/**
* Intel-specific bufmgr bits that follow immediately after the
* generic bufmgr structure.
*/
struct intel_bufmgr {
/**
* Add relocation entry in reloc_buf, which will be updated with the
* target buffer's real offset on on command submission.
*
* Relocations remain in place for the lifetime of the buffer object.
*
* \param reloc_buf Buffer to write the relocation into.
* \param read_domains GEM read domains which the buffer will be read into
* by the command that this relocation is part of.
* \param write_domains GEM read domains which the buffer will be dirtied
* in by the command that this relocation is part of.
* \param delta Constant value to be added to the relocation target's
* offset.
* \param offset Byte offset within batch_buf of the relocated pointer.
* \param target Buffer whose offset should be written into the relocation
* entry.
*/
int (*emit_reloc)(dri_bo *reloc_buf,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target);
/**
* Pin a buffer to the aperture and fix the offset until unpinned
*
* \param buf Buffer to pin
* \param alignment Required alignment for aperture, in bytes
*/
int (*pin) (dri_bo *buf, uint32_t alignment);
/**
* Unpin a buffer from the aperture, allowing it to be removed
*
* \param buf Buffer to unpin
*/
int (*unpin) (dri_bo *buf);
/**
* Ask that the buffer be placed in tiling mode
*
* \param buf Buffer to set tiling mode for
* \param tiling_mode desired, and returned tiling mode
*/
int (*set_tiling) (dri_bo *bo, uint32_t *tiling_mode);
/**
* Create a visible name for a buffer which can be used by other apps
*
* \param buf Buffer to create a name for
* \param name Returned name
*/
int (*flink) (dri_bo *buf, uint32_t *name);
};
/* intel_bufmgr_gem.c */
dri_bufmgr *intel_bufmgr_gem_init(int fd, int batch_size);
dri_bo *intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
unsigned int handle);
void intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr);
/* intel_bufmgr_fake.c */
dri_bufmgr *intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
unsigned long size,
unsigned int (*fence_emit)(void *private),
int (*fence_wait)(void *private,
unsigned int cookie),
void *driver_priv);
dri_bo *intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
unsigned long offset, unsigned long size,
void *virtual);
void intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr);
void intel_bo_fake_disable_backing_store(dri_bo *bo,
void (*invalidate_cb)(dri_bo *bo,
void *ptr),
void *ptr);
void intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr);
int intel_bo_emit_reloc(dri_bo *reloc_buf,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target_buf);
int intel_bo_pin(dri_bo *buf, uint32_t alignment);
int intel_bo_unpin(dri_bo *buf);
int intel_bo_set_tiling(dri_bo *buf, uint32_t *tiling_mode);
int intel_bo_flink(dri_bo *buf, uint32_t *name);
#endif /* INTEL_BUFMGR_GEM_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,992 @@
/**************************************************************************
*
* Copyright © 2007 Red Hat Inc.
* Copyright © 2007 Intel Corporation
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
* Eric Anholt <eric@anholt.net>
* Dave Airlie <airlied@linux.ie>
*/
#include <xf86drm.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include "errno.h"
#include "dri_bufmgr.h"
#include "intel_bufmgr.h"
#include "string.h"
#include "i915_drm.h"
#define DBG(...) do { \
if (bufmgr_gem->bufmgr.debug) \
fprintf(stderr, __VA_ARGS__); \
} while (0)
typedef struct _dri_bo_gem dri_bo_gem;
struct dri_gem_bo_bucket {
dri_bo_gem *head, **tail;
/**
* Limit on the number of entries in this bucket.
*
* 0 means that this caching at this bucket size is disabled.
* -1 means that there is no limit to caching at this size.
*/
int max_entries;
int num_entries;
};
/* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse
* is 1 << 16 pages, or 256MB.
*/
#define INTEL_GEM_BO_BUCKETS 16
typedef struct _dri_bufmgr_gem {
dri_bufmgr bufmgr;
struct intel_bufmgr intel_bufmgr;
int fd;
int max_relocs;
struct drm_i915_gem_exec_object *exec_objects;
dri_bo **exec_bos;
int exec_size;
int exec_count;
/** Array of lists of cached gem objects of power-of-two sizes */
struct dri_gem_bo_bucket cache_bucket[INTEL_GEM_BO_BUCKETS];
struct drm_i915_gem_execbuffer exec_arg;
} dri_bufmgr_gem;
struct _dri_bo_gem {
dri_bo bo;
int refcount;
/** Boolean whether the mmap ioctl has been called for this buffer yet. */
int mapped;
uint32_t gem_handle;
const char *name;
/**
* Kenel-assigned global name for this object
*/
unsigned int global_name;
/**
* Index of the buffer within the validation list while preparing a
* batchbuffer execution.
*/
int validate_index;
/**
* Boolean whether we've started swrast
* Set when the buffer has been mapped
* Cleared when the buffer is unmapped
*/
int swrast;
/** Array passed to the DRM containing relocation information. */
struct drm_i915_gem_relocation_entry *relocs;
/** Array of bos corresponding to relocs[i].target_handle */
dri_bo **reloc_target_bo;
/** Number of entries in relocs */
int reloc_count;
/** Mapped address for the buffer */
void *virtual;
/** free list */
dri_bo_gem *next;
};
static int
logbase2(int n)
{
int i = 1;
int log2 = 0;
while (n > i) {
i *= 2;
log2++;
}
return log2;
}
static struct dri_gem_bo_bucket *
dri_gem_bo_bucket_for_size(dri_bufmgr_gem *bufmgr_gem, unsigned long size)
{
int i;
/* We only do buckets in power of two increments */
if ((size & (size - 1)) != 0)
return NULL;
/* We should only see sizes rounded to pages. */
assert((size % 4096) == 0);
/* We always allocate in units of pages */
i = ffs(size / 4096) - 1;
if (i >= INTEL_GEM_BO_BUCKETS)
return NULL;
return &bufmgr_gem->cache_bucket[i];
}
static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem)
{
int i, j;
for (i = 0; i < bufmgr_gem->exec_count; i++) {
dri_bo *bo = bufmgr_gem->exec_bos[i];
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
if (bo_gem->relocs == NULL) {
DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name);
continue;
}
for (j = 0; j < bo_gem->reloc_count; j++) {
dri_bo *target_bo = bo_gem->reloc_target_bo[j];
dri_bo_gem *target_gem = (dri_bo_gem *)target_bo;
DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n",
i,
bo_gem->gem_handle, bo_gem->name, bo_gem->relocs[j].offset,
target_gem->gem_handle, target_gem->name, target_bo->offset,
bo_gem->relocs[j].delta);
}
}
}
/**
* Adds the given buffer to the list of buffers to be validated (moved into the
* appropriate memory type) with the next batch submission.
*
* If a buffer is validated multiple times in a batch submission, it ends up
* with the intersection of the memory type flags and the union of the
* access flags.
*/
static void
intel_add_validate_buffer(dri_bo *bo)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
int index;
if (bo_gem->validate_index != -1)
return;
/* Extend the array of validation entries as necessary. */
if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
int new_size = bufmgr_gem->exec_size * 2;
if (new_size == 0)
new_size = 5;
bufmgr_gem->exec_objects =
realloc(bufmgr_gem->exec_objects,
sizeof(*bufmgr_gem->exec_objects) * new_size);
bufmgr_gem->exec_bos =
realloc(bufmgr_gem->exec_bos,
sizeof(*bufmgr_gem->exec_bos) * new_size);
bufmgr_gem->exec_size = new_size;
}
index = bufmgr_gem->exec_count;
bo_gem->validate_index = index;
/* Fill in array entry */
bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
bufmgr_gem->exec_objects[index].alignment = 0;
bufmgr_gem->exec_objects[index].offset = 0;
bufmgr_gem->exec_bos[index] = bo;
dri_bo_reference(bo);
bufmgr_gem->exec_count++;
}
#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
sizeof(uint32_t))
static int
intel_setup_reloc_list(dri_bo *bo)
{
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
bo_gem->relocs = malloc(bufmgr_gem->max_relocs *
sizeof(struct drm_i915_gem_relocation_entry));
bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs * sizeof(dri_bo *));
return 0;
}
static dri_bo *
dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
dri_bo_gem *bo_gem;
unsigned int page_size = getpagesize();
int ret;
struct dri_gem_bo_bucket *bucket;
int alloc_from_cache = 0;
unsigned long bo_size;
/* Round the allocated size up to a power of two number of pages. */
bo_size = 1 << logbase2(size);
if (bo_size < page_size)
bo_size = page_size;
bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
/* If we don't have caching at this size, don't actually round the
* allocation up.
*/
if (bucket == NULL || bucket->max_entries == 0) {
bo_size = size;
if (bo_size < page_size)
bo_size = page_size;
}
/* Get a buffer out of the cache if available */
if (bucket != NULL && bucket->num_entries > 0) {
struct drm_i915_gem_busy busy;
bo_gem = bucket->head;
busy.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
alloc_from_cache = (ret == 0 && busy.busy == 0);
if (alloc_from_cache) {
bucket->head = bo_gem->next;
if (bo_gem->next == NULL)
bucket->tail = &bucket->head;
bucket->num_entries--;
}
}
if (!alloc_from_cache) {
struct drm_i915_gem_create create;
bo_gem = calloc(1, sizeof(*bo_gem));
if (!bo_gem)
return NULL;
bo_gem->bo.size = bo_size;
memset(&create, 0, sizeof(create));
create.size = bo_size;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
bo_gem->gem_handle = create.handle;
if (ret != 0) {
free(bo_gem);
return NULL;
}
bo_gem->bo.bufmgr = bufmgr;
}
bo_gem->name = name;
bo_gem->refcount = 1;
bo_gem->validate_index = -1;
DBG("bo_create: buf %d (%s) %ldb\n",
bo_gem->gem_handle, bo_gem->name, size);
return &bo_gem->bo;
}
/**
* Returns a dri_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
* to another.
*/
dri_bo *
intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
unsigned int handle)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
dri_bo_gem *bo_gem;
int ret;
struct drm_gem_open open_arg;
bo_gem = calloc(1, sizeof(*bo_gem));
if (!bo_gem)
return NULL;
memset(&open_arg, 0, sizeof(open_arg));
open_arg.name = handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
if (ret != 0) {
fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(-ret));
free(bo_gem);
return NULL;
}
bo_gem->bo.size = open_arg.size;
bo_gem->bo.offset = 0;
bo_gem->bo.virtual = NULL;
bo_gem->bo.bufmgr = bufmgr;
bo_gem->name = name;
bo_gem->refcount = 1;
bo_gem->validate_index = -1;
bo_gem->gem_handle = open_arg.handle;
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
return &bo_gem->bo;
}
static void
dri_gem_bo_reference(dri_bo *bo)
{
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
bo_gem->refcount++;
}
static void
dri_gem_bo_free(dri_bo *bo)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_gem_close close;
int ret;
if (bo_gem->mapped)
munmap (bo_gem->virtual, bo_gem->bo.size);
/* Close this object */
close.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
if (ret != 0) {
fprintf(stderr,
"DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
bo_gem->gem_handle, bo_gem->name, strerror(-ret));
}
free(bo);
}
static void
dri_gem_bo_unreference(dri_bo *bo)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
if (!bo)
return;
if (--bo_gem->refcount == 0) {
struct dri_gem_bo_bucket *bucket;
if (bo_gem->relocs != NULL) {
int i;
/* Unreference all the target buffers */
for (i = 0; i < bo_gem->reloc_count; i++)
dri_bo_unreference(bo_gem->reloc_target_bo[i]);
free(bo_gem->reloc_target_bo);
free(bo_gem->relocs);
}
DBG("bo_unreference final: %d (%s)\n",
bo_gem->gem_handle, bo_gem->name);
bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
/* Put the buffer into our internal cache for reuse if we can. */
if (bucket != NULL &&
(bucket->max_entries == -1 ||
(bucket->max_entries > 0 &&
bucket->num_entries < bucket->max_entries)))
{
bo_gem->name = 0;
bo_gem->validate_index = -1;
bo_gem->relocs = NULL;
bo_gem->reloc_target_bo = NULL;
bo_gem->reloc_count = 0;
bo_gem->next = NULL;
*bucket->tail = bo_gem;
bucket->tail = &bo_gem->next;
bucket->num_entries++;
} else {
dri_gem_bo_free(bo);
}
return;
}
}
static int
dri_gem_bo_map(dri_bo *bo, int write_enable)
{
dri_bufmgr_gem *bufmgr_gem;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_i915_gem_set_domain set_domain;
int ret;
bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
/* Allow recursive mapping. Mesa may recursively map buffers with
* nested display loops.
*/
if (!bo_gem->mapped) {
assert(bo->virtual == NULL);
DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
if (bo_gem->virtual == NULL) {
struct drm_i915_gem_mmap mmap_arg;
memset(&mmap_arg, 0, sizeof(mmap_arg));
mmap_arg.handle = bo_gem->gem_handle;
mmap_arg.offset = 0;
mmap_arg.size = bo->size;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
if (ret != 0) {
fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__,
bo_gem->gem_handle, bo_gem->name, strerror(errno));
}
bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
}
bo->virtual = bo_gem->virtual;
bo_gem->swrast = 0;
bo_gem->mapped = 1;
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual);
}
if (!bo_gem->swrast) {
set_domain.handle = bo_gem->gem_handle;
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
if (write_enable)
set_domain.write_domain = I915_GEM_DOMAIN_CPU;
else
set_domain.write_domain = 0;
do {
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
&set_domain);
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
__FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
}
bo_gem->swrast = 1;
}
return 0;
}
static int
dri_gem_bo_unmap(dri_bo *bo)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_i915_gem_sw_finish sw_finish;
int ret;
if (bo == NULL)
return 0;
assert(bo_gem->mapped);
if (bo_gem->swrast) {
sw_finish.handle = bo_gem->gem_handle;
do {
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
&sw_finish);
} while (ret == -1 && errno == EINTR);
bo_gem->swrast = 0;
}
return 0;
}
static int
dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_i915_gem_pwrite pwrite;
int ret;
memset (&pwrite, 0, sizeof (pwrite));
pwrite.handle = bo_gem->gem_handle;
pwrite.offset = offset;
pwrite.size = size;
pwrite.data_ptr = (uint64_t) (uintptr_t) data;
do {
ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__,
bo_gem->gem_handle, (int) offset, (int) size,
strerror (errno));
}
return 0;
}
static int
dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_i915_gem_pread pread;
int ret;
memset (&pread, 0, sizeof (pread));
pread.handle = bo_gem->gem_handle;
pread.offset = offset;
pread.size = size;
pread.data_ptr = (uint64_t) (uintptr_t) data;
do {
ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__,
bo_gem->gem_handle, (int) offset, (int) size,
strerror (errno));
}
return 0;
}
static void
dri_gem_bo_wait_rendering(dri_bo *bo)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_i915_gem_set_domain set_domain;
int ret;
set_domain.handle = bo_gem->gem_handle;
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
set_domain.write_domain = 0;
ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
__FILE__, __LINE__,
bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
strerror (errno));
}
}
static void
dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
int i;
free(bufmgr_gem->exec_objects);
free(bufmgr_gem->exec_bos);
/* Free any cached buffer objects we were going to reuse */
for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
dri_bo_gem *bo_gem;
while ((bo_gem = bucket->head) != NULL) {
bucket->head = bo_gem->next;
if (bo_gem->next == NULL)
bucket->tail = &bucket->head;
bucket->num_entries--;
dri_gem_bo_free(&bo_gem->bo);
}
}
free(bufmgr);
}
/**
* Adds the target buffer to the validation list and adds the relocation
* to the reloc_buffer's relocation list.
*
* The relocation entry at the given offset must already contain the
* precomputed relocation value, because the kernel will optimize out
* the relocation entry write when the buffer hasn't moved from the
* last known offset in target_bo.
*/
static int
dri_gem_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target_bo)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo;
/* Create a new relocation list if needed */
if (bo_gem->relocs == NULL)
intel_setup_reloc_list(bo);
/* Check overflow */
assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
/* Check args */
assert (offset <= bo->size - 4);
assert ((write_domain & (write_domain-1)) == 0);
bo_gem->relocs[bo_gem->reloc_count].offset = offset;
bo_gem->relocs[bo_gem->reloc_count].delta = delta;
bo_gem->relocs[bo_gem->reloc_count].target_handle =
target_bo_gem->gem_handle;
bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
dri_bo_reference(target_bo);
bo_gem->reloc_count++;
return 0;
}
/**
* Walk the tree of relocations rooted at BO and accumulate the list of
* validations to be performed and update the relocation buffers with
* index values into the validation list.
*/
static void
dri_gem_bo_process_reloc(dri_bo *bo)
{
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
int i;
if (bo_gem->relocs == NULL)
return;
for (i = 0; i < bo_gem->reloc_count; i++) {
dri_bo *target_bo = bo_gem->reloc_target_bo[i];
/* Continue walking the tree depth-first. */
dri_gem_bo_process_reloc(target_bo);
/* Add the target to the validate list */
intel_add_validate_buffer(target_bo);
}
}
static void *
dri_gem_process_reloc(dri_bo *batch_buf)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *) batch_buf->bufmgr;
/* Update indices and set up the validate list. */
dri_gem_bo_process_reloc(batch_buf);
/* Add the batch buffer to the validation list. There are no relocations
* pointing to it.
*/
intel_add_validate_buffer(batch_buf);
bufmgr_gem->exec_arg.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
bufmgr_gem->exec_arg.buffer_count = bufmgr_gem->exec_count;
bufmgr_gem->exec_arg.batch_start_offset = 0;
bufmgr_gem->exec_arg.batch_len = 0; /* written in intel_exec_ioctl */
return &bufmgr_gem->exec_arg;
}
static void
intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem)
{
int i;
for (i = 0; i < bufmgr_gem->exec_count; i++) {
dri_bo *bo = bufmgr_gem->exec_bos[i];
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
/* Update the buffer offset */
if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
bo_gem->gem_handle, bo_gem->name, bo->offset,
bufmgr_gem->exec_objects[i].offset);
bo->offset = bufmgr_gem->exec_objects[i].offset;
}
}
}
static void
dri_gem_post_submit(dri_bo *batch_buf)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)batch_buf->bufmgr;
int i;
intel_update_buffer_offsets (bufmgr_gem);
if (bufmgr_gem->bufmgr.debug)
dri_gem_dump_validation_list(bufmgr_gem);
for (i = 0; i < bufmgr_gem->exec_count; i++) {
dri_bo *bo = bufmgr_gem->exec_bos[i];
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
/* Need to call swrast on next bo_map */
bo_gem->swrast = 0;
/* Disconnect the buffer from the validate list */
bo_gem->validate_index = -1;
dri_bo_unreference(bo);
bufmgr_gem->exec_bos[i] = NULL;
}
bufmgr_gem->exec_count = 0;
}
static int
dri_gem_pin(dri_bo *bo, uint32_t alignment)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_i915_gem_pin pin;
int ret;
pin.handle = bo_gem->gem_handle;
pin.alignment = alignment;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
if (ret != 0)
return -errno;
bo->offset = pin.offset;
return 0;
}
static int
dri_gem_unpin(dri_bo *bo)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_i915_gem_unpin unpin;
int ret;
unpin.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
if (ret != 0)
return -errno;
return 0;
}
static int
dri_gem_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_i915_gem_set_tiling set_tiling;
int ret;
set_tiling.handle = bo_gem->gem_handle;
set_tiling.tiling_mode = *tiling_mode;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
if (ret != 0) {
*tiling_mode = I915_TILING_NONE;
return -errno;
}
*tiling_mode = set_tiling.tiling_mode;
return 0;
}
static int
dri_gem_flink(dri_bo *bo, uint32_t *name)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_gem_flink flink;
int ret;
if (!bo_gem->global_name) {
flink.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
if (ret != 0)
return -errno;
bo_gem->gem_handle = flink.name;
}
*name = bo_gem->gem_handle;
return 0;
}
/**
* Enables unlimited caching of buffer objects for reuse.
*
* This is potentially very memory expensive, as the cache at each bucket
* size is only bounded by how many buffers of that size we've managed to have
* in flight at once.
*/
void
intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
int i;
for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
bufmgr_gem->cache_bucket[i].max_entries = -1;
}
}
/*
*
*/
static int
dri_gem_check_aperture_space(dri_bo *bo_array, int count)
{
return 0;
}
/**
* Initializes the GEM buffer manager, which uses the kernel to allocate, map,
* and manage map buffer objections.
*
* \param fd File descriptor of the opened DRM device.
*/
dri_bufmgr *
intel_bufmgr_gem_init(int fd, int batch_size)
{
dri_bufmgr_gem *bufmgr_gem;
int i;
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
bufmgr_gem->fd = fd;
/* Let's go with one relocation per every 2 dwords (but round down a bit
* since a power of two will mean an extra page allocation for the reloc
* buffer).
*
* Every 4 was too few for the blender benchmark.
*/
bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
bufmgr_gem->bufmgr.bo_alloc = dri_gem_bo_alloc;
bufmgr_gem->bufmgr.bo_reference = dri_gem_bo_reference;
bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference;
bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map;
bufmgr_gem->bufmgr.bo_unmap = dri_gem_bo_unmap;
bufmgr_gem->bufmgr.bo_subdata = dri_gem_bo_subdata;
bufmgr_gem->bufmgr.bo_get_subdata = dri_gem_bo_get_subdata;
bufmgr_gem->bufmgr.bo_wait_rendering = dri_gem_bo_wait_rendering;
bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy;
bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc;
bufmgr_gem->bufmgr.post_submit = dri_gem_post_submit;
bufmgr_gem->bufmgr.debug = 0;
bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space;
bufmgr_gem->intel_bufmgr.emit_reloc = dri_gem_emit_reloc;
bufmgr_gem->intel_bufmgr.pin = dri_gem_pin;
bufmgr_gem->intel_bufmgr.unpin = dri_gem_unpin;
bufmgr_gem->intel_bufmgr.set_tiling = dri_gem_set_tiling;
bufmgr_gem->intel_bufmgr.flink = dri_gem_flink;
/* Initialize the linked lists for BO reuse cache. */
for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++)
bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
return &bufmgr_gem->bufmgr;
}
int
intel_bo_emit_reloc(dri_bo *reloc_buf,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target_buf)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(reloc_buf->bufmgr + 1);
return intel_bufmgr->emit_reloc(reloc_buf, read_domains, write_domain,
delta, offset, target_buf);
}
int
intel_bo_pin(dri_bo *bo, uint32_t alignment)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1);
if (intel_bufmgr->pin)
return intel_bufmgr->pin(bo, alignment);
return 0;
}
int
intel_bo_unpin(dri_bo *bo)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1);
if (intel_bufmgr->unpin)
return intel_bufmgr->unpin(bo);
return 0;
}
int intel_bo_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1);
if (intel_bufmgr->set_tiling)
return intel_bufmgr->set_tiling (bo, tiling_mode);
*tiling_mode = I915_TILING_NONE;
return 0;
}
int intel_bo_flink(dri_bo *bo, uint32_t *name)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1);
if (intel_bufmgr->flink)
return intel_bufmgr->flink (bo, name);
return -ENODEV;
}

281
libdrm/intel/mm.c Normal file
View File

@ -0,0 +1,281 @@
/*
* GLX Hardware Device Driver common code
* Copyright (C) 1999 Wittawat Yamwong
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <stdlib.h>
#include <assert.h>
#include "xf86drm.h"
#include "mm.h"
void
mmDumpMemInfo(const struct mem_block *heap)
{
drmMsg("Memory heap %p:\n", (void *)heap);
if (heap == 0) {
drmMsg(" heap == 0\n");
} else {
const struct mem_block *p;
for(p = heap->next; p != heap; p = p->next) {
drmMsg(" Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
p->free ? 'F':'.',
p->reserved ? 'R':'.');
}
drmMsg("\nFree list:\n");
for(p = heap->next_free; p != heap; p = p->next_free) {
drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
p->free ? 'F':'.',
p->reserved ? 'R':'.');
}
}
drmMsg("End of memory blocks\n");
}
struct mem_block *
mmInit(int ofs, int size)
{
struct mem_block *heap, *block;
if (size <= 0)
return NULL;
heap = (struct mem_block *) calloc(1, sizeof(struct mem_block));
if (!heap)
return NULL;
block = (struct mem_block *) calloc(1, sizeof(struct mem_block));
if (!block) {
free(heap);
return NULL;
}
heap->next = block;
heap->prev = block;
heap->next_free = block;
heap->prev_free = block;
block->heap = heap;
block->next = heap;
block->prev = heap;
block->next_free = heap;
block->prev_free = heap;
block->ofs = ofs;
block->size = size;
block->free = 1;
return heap;
}
static struct mem_block *
SliceBlock(struct mem_block *p,
int startofs, int size,
int reserved, int alignment)
{
struct mem_block *newblock;
/* break left [p, newblock, p->next], then p = newblock */
if (startofs > p->ofs) {
newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
if (!newblock)
return NULL;
newblock->ofs = startofs;
newblock->size = p->size - (startofs - p->ofs);
newblock->free = 1;
newblock->heap = p->heap;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
p->next = newblock;
newblock->next_free = p->next_free;
newblock->prev_free = p;
p->next_free->prev_free = newblock;
p->next_free = newblock;
p->size -= newblock->size;
p = newblock;
}
/* break right, also [p, newblock, p->next] */
if (size < p->size) {
newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
if (!newblock)
return NULL;
newblock->ofs = startofs + size;
newblock->size = p->size - size;
newblock->free = 1;
newblock->heap = p->heap;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
p->next = newblock;
newblock->next_free = p->next_free;
newblock->prev_free = p;
p->next_free->prev_free = newblock;
p->next_free = newblock;
p->size = size;
}
/* p = middle block */
p->free = 0;
/* Remove p from the free list:
*/
p->next_free->prev_free = p->prev_free;
p->prev_free->next_free = p->next_free;
p->next_free = 0;
p->prev_free = 0;
p->reserved = reserved;
return p;
}
struct mem_block *
mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
{
struct mem_block *p;
const int mask = (1 << align2)-1;
int startofs = 0;
int endofs;
if (!heap || align2 < 0 || size <= 0)
return NULL;
for (p = heap->next_free; p != heap; p = p->next_free) {
assert(p->free);
startofs = (p->ofs + mask) & ~mask;
if ( startofs < startSearch ) {
startofs = startSearch;
}
endofs = startofs+size;
if (endofs <= (p->ofs+p->size))
break;
}
if (p == heap)
return NULL;
assert(p->free);
p = SliceBlock(p,startofs,size,0,mask+1);
return p;
}
struct mem_block *
mmFindBlock(struct mem_block *heap, int start)
{
struct mem_block *p;
for (p = heap->next; p != heap; p = p->next) {
if (p->ofs == start)
return p;
}
return NULL;
}
static int
Join2Blocks(struct mem_block *p)
{
/* XXX there should be some assertions here */
/* NOTE: heap->free == 0 */
if (p->free && p->next->free) {
struct mem_block *q = p->next;
assert(p->ofs + p->size == q->ofs);
p->size += q->size;
p->next = q->next;
q->next->prev = p;
q->next_free->prev_free = q->prev_free;
q->prev_free->next_free = q->next_free;
free(q);
return 1;
}
return 0;
}
int
mmFreeMem(struct mem_block *b)
{
if (!b)
return 0;
if (b->free) {
drmMsg("block already free\n");
return -1;
}
if (b->reserved) {
drmMsg("block is reserved\n");
return -1;
}
b->free = 1;
b->next_free = b->heap->next_free;
b->prev_free = b->heap;
b->next_free->prev_free = b;
b->prev_free->next_free = b;
Join2Blocks(b);
if (b->prev != b->heap)
Join2Blocks(b->prev);
return 0;
}
void
mmDestroy(struct mem_block *heap)
{
struct mem_block *p;
if (!heap)
return;
for (p = heap->next; p != heap; ) {
struct mem_block *next = p->next;
free(p);
p = next;
}
free(heap);
}

96
libdrm/intel/mm.h Normal file
View File

@ -0,0 +1,96 @@
/*
* GLX Hardware Device Driver common code
* Copyright (C) 1999 Wittawat Yamwong
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* KEITH WHITWELL, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* Memory manager code. Primarily used by device drivers to manage texture
* heaps, etc.
*/
#ifndef MM_H
#define MM_H
struct mem_block {
struct mem_block *next, *prev;
struct mem_block *next_free, *prev_free;
struct mem_block *heap;
int ofs,size;
unsigned int free:1;
unsigned int reserved:1;
};
/* Rename the variables in the drm copy of this code so that it doesn't
* conflict with mesa or whoever else has copied it around.
*/
#define mmInit drm_mmInit
#define mmAllocMem drm_mmAllocMem
#define mmFreeMem drm_mmFreeMem
#define mmFindBlock drm_mmFindBlock
#define mmDestroy drm_mmDestroy
#define mmDumpMemInfo drm_mmDumpMemInfo
/**
* input: total size in bytes
* return: a heap pointer if OK, NULL if error
*/
extern struct mem_block *mmInit(int ofs, int size);
/**
* Allocate 'size' bytes with 2^align2 bytes alignment,
* restrict the search to free memory after 'startSearch'
* depth and back buffers should be in different 4mb banks
* to get better page hits if possible
* input: size = size of block
* align2 = 2^align2 bytes alignment
* startSearch = linear offset from start of heap to begin search
* return: pointer to the allocated block, 0 if error
*/
extern struct mem_block *mmAllocMem(struct mem_block *heap, int size,
int align2, int startSearch);
/**
* Free block starts at offset
* input: pointer to a block
* return: 0 if OK, -1 if error
*/
extern int mmFreeMem(struct mem_block *b);
/**
* Free block starts at offset
* input: pointer to a heap, start offset
* return: pointer to a block
*/
extern struct mem_block *mmFindBlock(struct mem_block *heap, int start);
/**
* destroy MM
*/
extern void mmDestroy(struct mem_block *mmInit);
/**
* For debuging purpose.
*/
extern void mmDumpMemInfo(const struct mem_block *mmInit);
#endif

View File

@ -110,7 +110,7 @@ static int drmDebugPrint(const char *format, va_list ap)
static int (*drm_debug_print)(const char *format, va_list ap) = drmDebugPrint;
static void
void
drmMsg(const char *format, ...)
{
va_list ap;
@ -171,6 +171,19 @@ static char *drmStrdup(const char *s)
return retval;
}
/**
* Call ioctl, restarting if it is interupted
*/
static int
drmIoctl(int fd, int request, void *arg)
{
int ret;
do {
ret = ioctl(fd, request, arg);
} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
return ret;
}
static unsigned long drmGetKeyFromFd(int fd)
{
@ -668,7 +681,7 @@ drmVersionPtr drmGetVersion(int fd)
version->desc_len = 0;
version->desc = NULL;
if (ioctl(fd, DRM_IOCTL_VERSION, version)) {
if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
drmFreeKernelVersion(version);
return NULL;
}
@ -680,7 +693,7 @@ drmVersionPtr drmGetVersion(int fd)
if (version->desc_len)
version->desc = drmMalloc(version->desc_len + 1);
if (ioctl(fd, DRM_IOCTL_VERSION, version)) {
if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
drmMsg("DRM_IOCTL_VERSION: %s\n", strerror(errno));
drmFreeKernelVersion(version);
return NULL;
@ -766,10 +779,10 @@ char *drmGetBusid(int fd)
u.unique_len = 0;
u.unique = NULL;
if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
return NULL;
u.unique = drmMalloc(u.unique_len + 1);
if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
return NULL;
u.unique[u.unique_len] = '\0';
@ -796,7 +809,7 @@ int drmSetBusid(int fd, const char *busid)
u.unique = (char *)busid;
u.unique_len = strlen(busid);
if (ioctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) {
if (drmIoctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) {
return -errno;
}
return 0;
@ -807,7 +820,7 @@ int drmGetMagic(int fd, drm_magic_t * magic)
drm_auth_t auth;
*magic = 0;
if (ioctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
if (drmIoctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
return -errno;
*magic = auth.magic;
return 0;
@ -818,7 +831,7 @@ int drmAuthMagic(int fd, drm_magic_t magic)
drm_auth_t auth;
auth.magic = magic;
if (ioctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))
if (drmIoctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))
return -errno;
return 0;
}
@ -883,7 +896,7 @@ int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type,
map.handle = 0;
map.type = type;
map.flags = flags;
if (ioctl(fd, DRM_IOCTL_ADD_MAP, &map))
if (drmIoctl(fd, DRM_IOCTL_ADD_MAP, &map))
return -errno;
if (handle)
*handle = (drm_handle_t)map.handle;
@ -896,7 +909,7 @@ int drmRmMap(int fd, drm_handle_t handle)
map.handle = (void *)handle;
if(ioctl(fd, DRM_IOCTL_RM_MAP, &map))
if(drmIoctl(fd, DRM_IOCTL_RM_MAP, &map))
return -errno;
return 0;
}
@ -929,7 +942,7 @@ int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags,
request.flags = flags;
request.agp_start = agp_offset;
if (ioctl(fd, DRM_IOCTL_ADD_BUFS, &request))
if (drmIoctl(fd, DRM_IOCTL_ADD_BUFS, &request))
return -errno;
return request.count;
}
@ -942,7 +955,7 @@ int drmMarkBufs(int fd, double low, double high)
info.count = 0;
info.list = NULL;
if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info))
if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
return -EINVAL;
if (!info.count)
@ -951,7 +964,7 @@ int drmMarkBufs(int fd, double low, double high)
if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
return -ENOMEM;
if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
int retval = -errno;
drmFree(info.list);
return retval;
@ -960,7 +973,7 @@ int drmMarkBufs(int fd, double low, double high)
for (i = 0; i < info.count; i++) {
info.list[i].low_mark = low * info.list[i].count;
info.list[i].high_mark = high * info.list[i].count;
if (ioctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) {
if (drmIoctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) {
int retval = -errno;
drmFree(info.list);
return retval;
@ -992,7 +1005,7 @@ int drmFreeBufs(int fd, int count, int *list)
request.count = count;
request.list = list;
if (ioctl(fd, DRM_IOCTL_FREE_BUFS, &request))
if (drmIoctl(fd, DRM_IOCTL_FREE_BUFS, &request))
return -errno;
return 0;
}
@ -1081,14 +1094,14 @@ drmBufInfoPtr drmGetBufInfo(int fd)
info.count = 0;
info.list = NULL;
if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info))
if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
return NULL;
if (info.count) {
if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
return NULL;
if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
drmFree(info.list);
return NULL;
}
@ -1132,7 +1145,7 @@ drmBufMapPtr drmMapBufs(int fd)
bufs.count = 0;
bufs.list = NULL;
bufs.virtual = NULL;
if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))
if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))
return NULL;
if (!bufs.count)
@ -1141,7 +1154,7 @@ drmBufMapPtr drmMapBufs(int fd)
if (!(bufs.list = drmMalloc(bufs.count * sizeof(*bufs.list))))
return NULL;
if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) {
if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) {
drmFree(bufs.list);
return NULL;
}
@ -1256,7 +1269,7 @@ int drmGetLock(int fd, drm_context_t context, drmLockFlags flags)
if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
while (ioctl(fd, DRM_IOCTL_LOCK, &lock))
while (drmIoctl(fd, DRM_IOCTL_LOCK, &lock))
;
return 0;
}
@ -1279,7 +1292,7 @@ int drmUnlock(int fd, drm_context_t context)
lock.context = context;
lock.flags = 0;
return ioctl(fd, DRM_IOCTL_UNLOCK, &lock);
return drmIoctl(fd, DRM_IOCTL_UNLOCK, &lock);
}
drm_context_t *drmGetReservedContextList(int fd, int *count)
@ -1291,7 +1304,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
res.count = 0;
res.contexts = NULL;
if (ioctl(fd, DRM_IOCTL_RES_CTX, &res))
if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
return NULL;
if (!res.count)
@ -1305,7 +1318,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
}
res.contexts = list;
if (ioctl(fd, DRM_IOCTL_RES_CTX, &res))
if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
return NULL;
for (i = 0; i < res.count; i++)
@ -1344,7 +1357,7 @@ int drmCreateContext(int fd, drm_context_t *handle)
drm_ctx_t ctx;
ctx.flags = 0; /* Modified with functions below */
if (ioctl(fd, DRM_IOCTL_ADD_CTX, &ctx))
if (drmIoctl(fd, DRM_IOCTL_ADD_CTX, &ctx))
return -errno;
*handle = ctx.handle;
return 0;
@ -1355,7 +1368,7 @@ int drmSwitchToContext(int fd, drm_context_t context)
drm_ctx_t ctx;
ctx.handle = context;
if (ioctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))
if (drmIoctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))
return -errno;
return 0;
}
@ -1376,7 +1389,7 @@ int drmSetContextFlags(int fd, drm_context_t context, drm_context_tFlags flags)
ctx.flags |= _DRM_CONTEXT_PRESERVED;
if (flags & DRM_CONTEXT_2DONLY)
ctx.flags |= _DRM_CONTEXT_2DONLY;
if (ioctl(fd, DRM_IOCTL_MOD_CTX, &ctx))
if (drmIoctl(fd, DRM_IOCTL_MOD_CTX, &ctx))
return -errno;
return 0;
}
@ -1387,7 +1400,7 @@ int drmGetContextFlags(int fd, drm_context_t context,
drm_ctx_t ctx;
ctx.handle = context;
if (ioctl(fd, DRM_IOCTL_GET_CTX, &ctx))
if (drmIoctl(fd, DRM_IOCTL_GET_CTX, &ctx))
return -errno;
*flags = 0;
if (ctx.flags & _DRM_CONTEXT_PRESERVED)
@ -1418,7 +1431,7 @@ int drmDestroyContext(int fd, drm_context_t handle)
{
drm_ctx_t ctx;
ctx.handle = handle;
if (ioctl(fd, DRM_IOCTL_RM_CTX, &ctx))
if (drmIoctl(fd, DRM_IOCTL_RM_CTX, &ctx))
return -errno;
return 0;
}
@ -1426,7 +1439,7 @@ int drmDestroyContext(int fd, drm_context_t handle)
int drmCreateDrawable(int fd, drm_drawable_t *handle)
{
drm_draw_t draw;
if (ioctl(fd, DRM_IOCTL_ADD_DRAW, &draw))
if (drmIoctl(fd, DRM_IOCTL_ADD_DRAW, &draw))
return -errno;
*handle = draw.handle;
return 0;
@ -1436,7 +1449,7 @@ int drmDestroyDrawable(int fd, drm_drawable_t handle)
{
drm_draw_t draw;
draw.handle = handle;
if (ioctl(fd, DRM_IOCTL_RM_DRAW, &draw))
if (drmIoctl(fd, DRM_IOCTL_RM_DRAW, &draw))
return -errno;
return 0;
}
@ -1452,7 +1465,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
update.num = num;
update.data = (unsigned long long)(unsigned long)data;
if (ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))
if (drmIoctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))
return -errno;
return 0;
@ -1472,7 +1485,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
*/
int drmAgpAcquire(int fd)
{
if (ioctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))
if (drmIoctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))
return -errno;
return 0;
}
@ -1490,7 +1503,7 @@ int drmAgpAcquire(int fd)
*/
int drmAgpRelease(int fd)
{
if (ioctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))
if (drmIoctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))
return -errno;
return 0;
}
@ -1513,7 +1526,7 @@ int drmAgpEnable(int fd, unsigned long mode)
drm_agp_mode_t m;
m.mode = mode;
if (ioctl(fd, DRM_IOCTL_AGP_ENABLE, &m))
if (drmIoctl(fd, DRM_IOCTL_AGP_ENABLE, &m))
return -errno;
return 0;
}
@ -1544,7 +1557,7 @@ int drmAgpAlloc(int fd, unsigned long size, unsigned long type,
b.size = size;
b.handle = 0;
b.type = type;
if (ioctl(fd, DRM_IOCTL_AGP_ALLOC, &b))
if (drmIoctl(fd, DRM_IOCTL_AGP_ALLOC, &b))
return -errno;
if (address != 0UL)
*address = b.physical;
@ -1571,7 +1584,7 @@ int drmAgpFree(int fd, drm_handle_t handle)
b.size = 0;
b.handle = handle;
if (ioctl(fd, DRM_IOCTL_AGP_FREE, &b))
if (drmIoctl(fd, DRM_IOCTL_AGP_FREE, &b))
return -errno;
return 0;
}
@ -1596,7 +1609,7 @@ int drmAgpBind(int fd, drm_handle_t handle, unsigned long offset)
b.handle = handle;
b.offset = offset;
if (ioctl(fd, DRM_IOCTL_AGP_BIND, &b))
if (drmIoctl(fd, DRM_IOCTL_AGP_BIND, &b))
return -errno;
return 0;
}
@ -1620,7 +1633,7 @@ int drmAgpUnbind(int fd, drm_handle_t handle)
b.handle = handle;
b.offset = 0;
if (ioctl(fd, DRM_IOCTL_AGP_UNBIND, &b))
if (drmIoctl(fd, DRM_IOCTL_AGP_UNBIND, &b))
return -errno;
return 0;
}
@ -1641,7 +1654,7 @@ int drmAgpVersionMajor(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return -errno;
return i.agp_version_major;
}
@ -1662,7 +1675,7 @@ int drmAgpVersionMinor(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return -errno;
return i.agp_version_minor;
}
@ -1683,7 +1696,7 @@ unsigned long drmAgpGetMode(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.mode;
}
@ -1704,7 +1717,7 @@ unsigned long drmAgpBase(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.aperture_base;
}
@ -1725,7 +1738,7 @@ unsigned long drmAgpSize(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.aperture_size;
}
@ -1746,7 +1759,7 @@ unsigned long drmAgpMemoryUsed(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.memory_used;
}
@ -1767,7 +1780,7 @@ unsigned long drmAgpMemoryAvail(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.memory_allowed;
}
@ -1788,7 +1801,7 @@ unsigned int drmAgpVendorId(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.id_vendor;
}
@ -1809,7 +1822,7 @@ unsigned int drmAgpDeviceId(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.id_device;
}
@ -1821,7 +1834,7 @@ int drmScatterGatherAlloc(int fd, unsigned long size, drm_handle_t *handle)
*handle = 0;
sg.size = size;
sg.handle = 0;
if (ioctl(fd, DRM_IOCTL_SG_ALLOC, &sg))
if (drmIoctl(fd, DRM_IOCTL_SG_ALLOC, &sg))
return -errno;
*handle = sg.handle;
return 0;
@ -1833,7 +1846,7 @@ int drmScatterGatherFree(int fd, drm_handle_t handle)
sg.size = 0;
sg.handle = handle;
if (ioctl(fd, DRM_IOCTL_SG_FREE, &sg))
if (drmIoctl(fd, DRM_IOCTL_SG_FREE, &sg))
return -errno;
return 0;
}
@ -1854,7 +1867,7 @@ int drmWaitVBlank(int fd, drmVBlankPtr vbl)
int ret;
do {
ret = ioctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
ret = drmIoctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
vbl->request.type &= ~DRM_VBLANK_RELATIVE;
} while (ret && errno == EINTR);
@ -1904,7 +1917,7 @@ int drmCtlInstHandler(int fd, int irq)
ctl.func = DRM_INST_HANDLER;
ctl.irq = irq;
if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl))
if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
return -errno;
return 0;
}
@ -1927,7 +1940,7 @@ int drmCtlUninstHandler(int fd)
ctl.func = DRM_UNINST_HANDLER;
ctl.irq = 0;
if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl))
if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
return -errno;
return 0;
}
@ -1944,7 +1957,7 @@ int drmFinish(int fd, int context, drmLockFlags flags)
if (flags & DRM_LOCK_FLUSH_ALL) lock.flags |= _DRM_LOCK_FLUSH_ALL;
if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
if (ioctl(fd, DRM_IOCTL_FINISH, &lock))
if (drmIoctl(fd, DRM_IOCTL_FINISH, &lock))
return -errno;
return 0;
}
@ -1970,7 +1983,7 @@ int drmGetInterruptFromBusID(int fd, int busnum, int devnum, int funcnum)
p.busnum = busnum;
p.devnum = devnum;
p.funcnum = funcnum;
if (ioctl(fd, DRM_IOCTL_IRQ_BUSID, &p))
if (drmIoctl(fd, DRM_IOCTL_IRQ_BUSID, &p))
return -errno;
return p.irq;
}
@ -2012,7 +2025,7 @@ int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
map.ctx_id = ctx_id;
map.handle = (void *)handle;
if (ioctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
if (drmIoctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
return -errno;
return 0;
}
@ -2024,7 +2037,7 @@ int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
map.ctx_id = ctx_id;
if (ioctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
if (drmIoctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
return -errno;
if (handle)
*handle = (drm_handle_t)map.handle;
@ -2039,7 +2052,7 @@ int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size,
drm_map_t map;
map.offset = idx;
if (ioctl(fd, DRM_IOCTL_GET_MAP, &map))
if (drmIoctl(fd, DRM_IOCTL_GET_MAP, &map))
return -errno;
*offset = map.offset;
*size = map.size;
@ -2056,7 +2069,7 @@ int drmGetClient(int fd, int idx, int *auth, int *pid, int *uid,
drm_client_t client;
client.idx = idx;
if (ioctl(fd, DRM_IOCTL_GET_CLIENT, &client))
if (drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client))
return -errno;
*auth = client.auth;
*pid = client.pid;
@ -2071,7 +2084,7 @@ int drmGetStats(int fd, drmStatsT *stats)
drm_stats_t s;
int i;
if (ioctl(fd, DRM_IOCTL_GET_STATS, &s))
if (drmIoctl(fd, DRM_IOCTL_GET_STATS, &s))
return -errno;
stats->count = 0;
@ -2213,7 +2226,7 @@ int drmSetInterfaceVersion(int fd, drmSetVersion *version)
sv.drm_dd_major = version->drm_dd_major;
sv.drm_dd_minor = version->drm_dd_minor;
if (ioctl(fd, DRM_IOCTL_SET_VERSION, &sv)) {
if (drmIoctl(fd, DRM_IOCTL_SET_VERSION, &sv)) {
retcode = -errno;
}
@ -2244,7 +2257,7 @@ int drmCommandNone(int fd, unsigned long drmCommandIndex)
request = DRM_IO( DRM_COMMAND_BASE + drmCommandIndex);
if (ioctl(fd, request, data)) {
if (drmIoctl(fd, request, data)) {
return -errno;
}
return 0;
@ -2273,7 +2286,7 @@ int drmCommandRead(int fd, unsigned long drmCommandIndex, void *data,
request = DRM_IOC( DRM_IOC_READ, DRM_IOCTL_BASE,
DRM_COMMAND_BASE + drmCommandIndex, size);
if (ioctl(fd, request, data)) {
if (drmIoctl(fd, request, data)) {
return -errno;
}
return 0;
@ -2302,7 +2315,7 @@ int drmCommandWrite(int fd, unsigned long drmCommandIndex, void *data,
request = DRM_IOC( DRM_IOC_WRITE, DRM_IOCTL_BASE,
DRM_COMMAND_BASE + drmCommandIndex, size);
if (ioctl(fd, request, data)) {
if (drmIoctl(fd, request, data)) {
return -errno;
}
return 0;
@ -2331,9 +2344,8 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,
request = DRM_IOC( DRM_IOC_READ|DRM_IOC_WRITE, DRM_IOCTL_BASE,
DRM_COMMAND_BASE + drmCommandIndex, size);
if (ioctl(fd, request, data)) {
if (drmIoctl(fd, request, data))
return -errno;
}
return 0;
}
@ -2355,7 +2367,7 @@ int drmFenceCreate(int fd, unsigned flags, int fence_class, unsigned type,
arg.type = type;
arg.fence_class = fence_class;
if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
if (drmIoctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
return -errno;
fence->handle = arg.handle;
fence->fence_class = arg.fence_class;
@ -2379,7 +2391,7 @@ int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fenc
arg.flags = flags;
arg.fence_class = fence_class;
if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
if (drmIoctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
return -errno;
fence->handle = arg.handle;
fence->fence_class = arg.fence_class;
@ -2397,7 +2409,7 @@ int drmFenceReference(int fd, unsigned handle, drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = handle;
if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
if (drmIoctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
return -errno;
fence->handle = arg.handle;
fence->fence_class = arg.fence_class;
@ -2414,7 +2426,7 @@ int drmFenceUnreference(int fd, const drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
if (ioctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg))
if (drmIoctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg))
return -errno;
return 0;
}
@ -2427,7 +2439,7 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
arg.handle = fence->handle;
arg.type = flush_type;
if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
if (drmIoctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
return -errno;
fence->fence_class = arg.fence_class;
fence->type = arg.type;
@ -2442,7 +2454,7 @@ int drmFenceUpdate(int fd, drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
if (drmIoctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
return -errno;
fence->fence_class = arg.fence_class;
fence->type = arg.type;
@ -2482,7 +2494,7 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
arg.handle = fence->handle;
arg.type = emit_type;
if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
if (drmIoctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
return -errno;
fence->fence_class = arg.fence_class;
fence->type = arg.type;
@ -2520,7 +2532,7 @@ drmIoctlTimeout(int fd, unsigned long request, void *argp)
int ret;
do {
ret = ioctl(fd, request, argp);
ret = drmIoctl(fd, request, argp);
if (ret != 0 && errno == EAGAIN) {
if (!haveThen) {
gettimeofday(&then, NULL);
@ -2630,7 +2642,7 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf)
memset(&arg, 0, sizeof(arg));
req->handle = handle;
if (ioctl(fd, DRM_IOCTL_BO_REFERENCE, &arg))
if (drmIoctl(fd, DRM_IOCTL_BO_REFERENCE, &arg))
return -errno;
drmBOCopyReply(rep, buf);
@ -2654,7 +2666,7 @@ int drmBOUnreference(int fd, drmBO *buf)
memset(&arg, 0, sizeof(arg));
arg.handle = buf->handle;
if (ioctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg))
if (drmIoctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg))
return -errno;
buf->handle = 0;
@ -2724,7 +2736,7 @@ int drmBOUnmap(int fd, drmBO *buf)
memset(&arg, 0, sizeof(arg));
arg.handle = buf->handle;
if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
if (drmIoctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
return -errno;
}
buf->mapCount--;
@ -2770,7 +2782,7 @@ int drmBOInfo(int fd, drmBO *buf)
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
ret = ioctl(fd, DRM_IOCTL_BO_INFO, &arg);
ret = drmIoctl(fd, DRM_IOCTL_BO_INFO, &arg);
if (ret)
return -errno;
@ -2825,7 +2837,7 @@ int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
arg.p_size = pSize;
arg.mem_type = memType;
if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
if (drmIoctl(fd, DRM_IOCTL_MM_INIT, &arg))
return -errno;
return 0;
}
@ -2837,7 +2849,7 @@ int drmMMTakedown(int fd, unsigned memType)
memset(&arg, 0, sizeof(arg));
arg.mem_type = memType;
if (ioctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg))
if (drmIoctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg))
return -errno;
return 0;
}
@ -2879,7 +2891,7 @@ int drmMMInfo(int fd, unsigned memType, uint64_t *size)
arg.mem_type = memType;
if (ioctl(fd, DRM_IOCTL_MM_INFO, &arg))
if (drmIoctl(fd, DRM_IOCTL_MM_INFO, &arg))
return -errno;
*size = arg.p_size;
@ -2894,7 +2906,7 @@ int drmBOVersion(int fd, unsigned int *major,
int ret;
memset(&arg, 0, sizeof(arg));
ret = ioctl(fd, DRM_IOCTL_BO_VERSION, &arg);
ret = drmIoctl(fd, DRM_IOCTL_BO_VERSION, &arg);
if (ret)
return -errno;

View File

@ -657,6 +657,7 @@ extern int drmSLLookupNeighbors(void *l, unsigned long key,
extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);
extern void drmCloseOnce(int fd);
extern void drmMsg(const char *format, ...);
#include "xf86mm.h"

View File

@ -94,6 +94,18 @@ typedef struct _drmMMListHead
#define DRMLISTENTRY(__type, __item, __field) \
((__type *)(((char *) (__item)) - offsetof(__type, __field)))
#define DRMLISTEMPTY(__item) ((__item)->next == (__item))
#define DRMLISTFOREACHSAFE(__item, __temp, __list) \
for ((__item) = (__list)->next, (__temp) = (__item)->next; \
(__item) != (__list); \
(__item) = (__temp), (__temp) = (__item)->next)
#define DRMLISTFOREACHSAFEREVERSE(__item, __temp, __list) \
for ((__item) = (__list)->prev, (__temp) = (__item)->prev; \
(__item) != (__list); \
(__item) = (__temp), (__temp) = (__item)->prev)
typedef struct _drmFence
{
unsigned handle;

View File

@ -116,7 +116,7 @@ V := $(shell if [ -f $(BOOTVERSION_PREFIX)version.h ]; then \
ifeq ($(V),"$(RUNNING_REL)")
HEADERFROMBOOT := 1
GETCONFIG := MAKEFILES=$(shell pwd)/.config
GETCONFIG := MAKEFILES=$(shell /bin/pwd)/.config
HAVECONFIG := y
endif
@ -163,7 +163,7 @@ endif
all: modules
modules: includes
+make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules
+make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules
ifeq ($(HEADERFROMBOOT),1)
@ -239,7 +239,7 @@ drmstat: drmstat.c
$(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS)
install:
make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules_install
make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules_install
else

View File

@ -14,14 +14,15 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
drm_regman.o drm_vm_nopage_compat.o
drm_regman.o drm_vm_nopage_compat.o drm_gem.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \
i915_opregion.o
i915_opregion.o \
i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \

805
linux-core/drm-gem.txt Normal file
View File

@ -0,0 +1,805 @@
The Graphics Execution Manager
Part of the Direct Rendering Manager
==============================
Keith Packard <keithp@keithp.com>
Eric Anholt <eric@anholt.net>
2008-5-9
Contents:
1. GEM Overview
2. API overview and conventions
3. Object Creation/Destruction
4. Reading/writing contents
5. Mapping objects to userspace
6. Memory Domains
7. Execution (Intel specific)
8. Other misc Intel-specific functions
1. Graphics Execution Manager Overview
Gem is designed to manage graphics memory, control access to the graphics
device execution context and handle the essentially NUMA environment unique
to modern graphics hardware. Gem allows multiple applications to share
graphics device resources without the need to constantly reload the entire
graphics card. Data may be shared between multiple applications with gem
ensuring that the correct memory synchronization occurs.
Graphics data can consume arbitrary amounts of memory, with 3D applications
constructing ever larger sets of textures and vertices. With graphics cards
memory space growing larger every year, and graphics APIs growing more
complex, we can no longer insist that each application save a complete copy
of their graphics state so that the card can be re-initialized from user
space at each context switch. Ensuring that graphics data remains persistent
across context switches allows applications significant new functionality
while also improving performance for existing APIs.
Modern linux desktops include significant 3D rendering as a fundemental
component of the desktop image construction process. 2D and 3D applications
paint their content to offscreen storage and the central 'compositing
manager' constructs the final screen image from those window contents. This
means that pixel image data from these applications must move within reach
of the compositing manager and used as source operands for screen image
rendering operations.
Gem provides simple mechanisms to manage graphics data and control execution
flow within the linux operating system. Using many existing kernel
subsystems, it does this with a modest amount of code.
2. API Overview and Conventions
All APIs here are defined in terms of ioctls appplied to the DRM file
descriptor. To create and manipulate objects, an application must be
'authorized' using the DRI or DRI2 protocols with the X server. To relax
that, we will need to implement some better access control mechanisms within
the hardware portion of the driver to prevent inappropriate
cross-application data access.
Any DRM driver which does not support GEM will return -ENODEV for all of
these ioctls. Invalid object handles return -EINVAL. Invalid object names
return -ENOENT. Other errors are as documented in the specific API below.
To avoid the need to translate ioctl contents on mixed-size systems (with
32-bit user space running on a 64-bit kernel), the ioctl data structures
contain explicitly sized objects, using 64-bits for all size and pointer
data and 32-bits for identifiers. In addition, the 64-bit objects are all
carefully aligned on 64-bit boundaries. Because of this, all pointers in the
ioctl data structures are passed as uint64_t values. Suitable casts will
be necessary.
One significant operation which is explicitly left out of this API is object
locking. Applications are expected to perform locking of shared objects
outside of the GEM api. This kind of locking is not necessary to safely
manipulate the graphics engine, and with multiple objects interacting in
unknown ways, per-object locking would likely introduce all kinds of
lock-order issues. Punting this to the application seems like the only
sensible plan. Given that DRM already offers a global lock on the hardware,
this doesn't change the current situation.
3. Object Creation and Destruction
Gem provides explicit memory management primitives. System pages are
allocated when the object is created, either as the fundemental storage for
hardware where system memory is used by the graphics processor directly, or
as backing store for graphics-processor resident memory.
Objects are referenced from user space using handles. These are, for all
intents and purposes, equivalent to file descriptors. We could simply use
file descriptors were it not for the small limit (1024) of file descriptors
available to applications, and for the fact that the X server (a rather
significant user of this API) uses 'select' and has a limited maximum file
descriptor for that operation. Given the ability to allocate more file
descriptors, and given the ability to place these 'higher' in the file
descriptor space, we'd love to simply use file descriptors.
Objects may be published with a name so that other applications can access
them. The name remains valid as long as the object exists. Right now, our
DRI APIs use 32-bit integer names, so that's what we expose here
A. Creation
struct drm_gem_create {
/**
* Requested size for the object.
*
* The (page-aligned) allocated size for the object
* will be returned.
*/
uint64_t size;
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
uint32_t handle;
uint32_t pad;
};
/* usage */
create.size = 16384;
ret = ioctl (fd, DRM_IOCTL_GEM_CREATE, &create);
if (ret == 0)
return create.handle;
Note that the size is rounded up to a page boundary, and that
the rounded-up size is returned in 'size'. No name is assigned to
this object, making it local to this process.
If insufficient memory is availabe, -ENOMEM will be returned.
B. Closing
struct drm_gem_close {
/** Handle of the object to be closed. */
uint32_t handle;
uint32_t pad;
};
/* usage */
close.handle = <handle>;
ret = ioctl (fd, DRM_IOCTL_GEM_CLOSE, &close);
This call makes the specified handle invalid, and if no other
applications are using the object, any necessary graphics hardware
synchronization is performed and the resources used by the object
released.
C. Naming
struct drm_gem_flink {
/** Handle for the object being named */
uint32_t handle;
/** Returned global name */
uint32_t name;
};
/* usage */
flink.handle = <handle>;
ret = ioctl (fd, DRM_IOCTL_GEM_FLINK, &flink);
if (ret == 0)
return flink.name;
Flink creates a name for the object and returns it to the
application. This name can be used by other applications to gain
access to the same object.
D. Opening by name
struct drm_gem_open {
/** Name of object being opened */
uint32_t name;
/** Returned handle for the object */
uint32_t handle;
/** Returned size of the object */
uint64_t size;
};
/* usage */
open.name = <name>;
ret = ioctl (fd, DRM_IOCTL_GEM_OPEN, &open);
if (ret == 0) {
*sizep = open.size;
return open.handle;
}
Open accesses an existing object and returns a handle for it. If the
object doesn't exist, -ENOENT is returned. The size of the object is
also returned. This handle has all the same capabilities as the
handle used to create the object. In particular, the object is not
destroyed until all handles are closed.
4. Basic read/write operations
By default, gem objects are not mapped to the applications address space,
getting data in and out of them is done with I/O operations instead. This
allows the data to reside in otherwise unmapped pages, including pages in
video memory on an attached discrete graphics card. In addition, using
explicit I/O operations allows better control over cache contents, as
graphics devices are generally not cache coherent with the CPU, mapping
pages used for graphics into an application address space requires the use
of expensive cache flushing operations. Providing direct control over
graphics data access ensures that data are handled in the most efficient
possible fashion.
A. Reading
struct drm_gem_pread {
/** Handle for the object being read. */
uint32_t handle;
uint32_t pad;
/** Offset into the object to read from */
uint64_t offset;
/** Length of data to read */
uint64_t size;
/** Pointer to write the data into. */
uint64_t data_ptr; /* void * */
};
This copies data into the specified object at the specified
position. Any necessary graphics device synchronization and
flushing will be done automatically.
struct drm_gem_pwrite {
/** Handle for the object being written to. */
uint32_t handle;
uint32_t pad;
/** Offset into the object to write to */
uint64_t offset;
/** Length of data to write */
uint64_t size;
/** Pointer to read the data from. */
uint64_t data_ptr; /* void * */
};
This copies data out of the specified object into the
waiting user memory. Again, device synchronization will
be handled by the kernel to ensure user space sees a
consistent view of the graphics device.
5. Mapping objects to user space
For most objects, reading/writing is the preferred interaction mode.
However, when the CPU is involved in rendering to cover deficiencies in
hardware support for particular operations, the CPU will want to directly
access the relevant objects.
Because mmap is fairly heavyweight, we allow applications to retain maps to
objects persistently and then update how they're using the memory through a
separate interface. Applications which fail to use this separate interface
may exhibit unpredictable behaviour as memory consistency will not be
preserved.
A. Mapping
struct drm_gem_mmap {
/** Handle for the object being mapped. */
uint32_t handle;
uint32_t pad;
/** Offset in the object to map. */
uint64_t offset;
/**
* Length of data to map.
*
* The value will be page-aligned.
*/
uint64_t size;
/** Returned pointer the data was mapped at */
uint64_t addr_ptr; /* void * */
};
/* usage */
mmap.handle = <handle>;
mmap.offset = <offset>;
mmap.size = <size>;
ret = ioctl (fd, DRM_IOCTL_GEM_MMAP, &mmap);
if (ret == 0)
return (void *) (uintptr_t) mmap.addr_ptr;
B. Unmapping
munmap (addr, length);
Nothing strange here, just use the normal munmap syscall.
6. Memory Domains
Graphics devices remain a strong bastion of non cache-coherent memory. As a
result, accessing data through one functional unit will end up loading that
cache with data which then needs to be manually synchronized when that data
is used with another functional unit.
Tracking where data are resident is done by identifying how functional units
deal with caches. Each cache is labeled as a separate memory domain. Then,
each sequence of operations is expected to load data into various read
domains and leave data in at most one write domain. Gem tracks the read and
write memory domains of each object and performs the necessary
synchronization operations when objects move from one domain set to another.
For example, if operation 'A' constructs an image that is immediately used
by operation 'B', then when the read domain for 'B' is not the same as the
write domain for 'A', then the write domain must be flushed, and the read
domain invalidated. If these two operations are both executed in the same
command queue, then the flush operation can go inbetween them in the same
queue, avoiding any kind of CPU-based synchronization and leaving the GPU to
do the work itself.
6.1 Memory Domains (GPU-independent)
* DRM_GEM_DOMAIN_CPU.
Objects in this domain are using caches which are connected to the CPU.
Moving objects from non-CPU domains into the CPU domain can involve waiting
for the GPU to finish with operations using this object. Moving objects
from this domain to a GPU domain can involve flushing CPU caches and chipset
buffers.
6.1 GPU-independent memory domain ioctl
This ioctl is independent of the GPU in use. So far, no use other than
synchronizing objects to the CPU domain have been found; if that turns out
to be generally true, this ioctl may be simplified further.
A. Explicit domain control
struct drm_gem_set_domain {
/** Handle for the object */
uint32_t handle;
/** New read domains */
uint32_t read_domains;
/** New write domain */
uint32_t write_domain;
};
/* usage */
set_domain.handle = <handle>;
set_domain.read_domains = <read_domains>;
set_domain.write_domain = <write_domain>;
ret = ioctl (fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
When the application wants to explicitly manage memory domains for
an object, it can use this function. Usually, this is only used
when the application wants to synchronize object contents between
the GPU and CPU-based application rendering. In that case,
the <read_domains> would be set to DRM_GEM_DOMAIN_CPU, and if the
application were going to write to the object, the <write_domain>
would also be set to DRM_GEM_DOMAIN_CPU. After the call, gem
guarantees that all previous rendering operations involving this
object are complete. The application is then free to access the
object through the address returned by the mmap call. Afterwards,
when the application again uses the object through the GPU, any
necessary CPU flushing will occur and the object will be correctly
synchronized with the GPU.
Note that this synchronization is not required for any accesses
going through the driver itself. The pread, pwrite and execbuffer
ioctls all perform the necessary domain management internally.
Explicit synchronization is only necessary when accessing the object
through the mmap'd address.
7. Execution (Intel specific)
Managing the command buffers is inherently chip-specific, so the core of gem
doesn't have any intrinsic functions. Rather, execution is left to the
device-specific portions of the driver.
The Intel DRM_I915_GEM_EXECBUFFER ioctl takes a list of gem objects, all of
which are mapped to the graphics device. The last object in the list is the
command buffer.
7.1. Relocations
Command buffers often refer to other objects, and to allow the kernel driver
to move objects around, a sequence of relocations is associated with each
object. Device-specific relocation operations are used to place the
target-object relative value into the object.
The Intel driver has a single relocation type:
struct drm_i915_gem_relocation_entry {
/**
* Handle of the buffer being pointed to by this
* relocation entry.
*
* It's appealing to make this be an index into the
* mm_validate_entry list to refer to the buffer,
* but this allows the driver to create a relocation
* list for state buffers and not re-write it per
* exec using the buffer.
*/
uint32_t target_handle;
/**
* Value to be added to the offset of the target
* buffer to make up the relocation entry.
*/
uint32_t delta;
/**
* Offset in the buffer the relocation entry will be
* written into
*/
uint64_t offset;
/**
* Offset value of the target buffer that the
* relocation entry was last written as.
*
* If the buffer has the same offset as last time, we
* can skip syncing and writing the relocation. This
* value is written back out by the execbuffer ioctl
* when the relocation is written.
*/
uint64_t presumed_offset;
/**
* Target memory domains read by this operation.
*/
uint32_t read_domains;
/*
* Target memory domains written by this operation.
*
* Note that only one domain may be written by the
* whole execbuffer operation, so that where there are
* conflicts, the application will get -EINVAL back.
*/
uint32_t write_domain;
};
'target_handle', the handle to the target object. This object must
be one of the objects listed in the execbuffer request or
bad things will happen. The kernel doesn't check for this.
'offset' is where, in the source object, the relocation data
are written. Each relocation value is a 32-bit value consisting
of the location of the target object in the GPU memory space plus
the 'delta' value included in the relocation.
'presumed_offset' is where user-space believes the target object
lies in GPU memory space. If this value matches where the object
actually is, then no relocation data are written, the kernel
assumes that user space has set up data in the source object
using this presumption. This offers a fairly important optimization
as writing relocation data requires mapping of the source object
into the kernel memory space.
'read_domains' and 'write_domains' list the usage by the source
object of the target object. The kernel unions all of the domain
information from all relocations in the execbuffer request. No more
than one write_domain is allowed, otherwise an EINVAL error is
returned. read_domains must contain write_domain. This domain
information is used to synchronize buffer contents as described
above in the section on domains.
7.1.1 Memory Domains (Intel specific)
The Intel GPU has several internal caches which are not coherent and hence
require explicit synchronization. Memory domains provide the necessary data
to synchronize what is needed while leaving other cache contents intact.
* DRM_GEM_DOMAIN_I915_RENDER.
The GPU 3D and 2D rendering operations use a unified rendering cache, so
operations doing 3D painting and 2D blts will use this domain
* DRM_GEM_DOMAIN_I915_SAMPLER
Textures are loaded by the sampler through a separate cache, so
any texture reading will use this domain. Note that the sampler
and renderer use different caches, so moving an object from render target
to texture source will require a domain transfer.
* DRM_GEM_DOMAIN_I915_COMMAND
The command buffer doesn't have an explicit cache (although it does
read ahead quite a bit), so this domain just indicates that the object
needs to be flushed to the GPU.
* DRM_GEM_DOMAIN_I915_INSTRUCTION
All of the programs on Gen4 and later chips use an instruction cache to
speed program execution. It must be explicitly flushed when new programs
are written to memory by the CPU.
* DRM_GEM_DOMAIN_I915_VERTEX
Vertex data uses two different vertex caches, but they're
both flushed with the same instruction.
7.2 Execution object list (Intel specific)
struct drm_i915_gem_exec_object {
/**
* User's handle for a buffer to be bound into the GTT
* for this operation.
*/
uint32_t handle;
/**
* List of relocations to be performed on this buffer
*/
uint32_t relocation_count;
/* struct drm_i915_gem_relocation_entry *relocs */
uint64_t relocs_ptr;
/**
* Required alignment in graphics aperture
*/
uint64_t alignment;
/**
* Returned value of the updated offset of the object,
* for future presumed_offset writes.
*/
uint64_t offset;
};
Each object involved in a particular execution operation must be
listed using one of these structures.
'handle' references the object.
'relocs_ptr' is a user-mode pointer to a array of 'relocation_count'
drm_i915_gem_relocation_entry structs (see above) that
define the relocations necessary in this buffer. Note that all
relocations must reference other exec_object structures in the same
execbuffer ioctl and that those other buffers must come earlier in
the exec_object array. In other words, the dependencies mapped by the
exec_object relocations must form a directed acyclic graph.
'alignment' is the byte alignment necessary for this buffer. Each
object has specific alignment requirements, as the kernel doesn't
know what each object is being used for, those requirements must be
provided by user mode. If an object is used in two different ways,
it's quite possible that the alignment requirements will differ.
'offset' is a return value, receiving the location of the object
during this execbuffer operation. The application should use this
as the presumed offset in future operations; if the object does not
move, then kernel need not write relocation data.
7.3 Execbuffer ioctl (Intel specific)
struct drm_i915_gem_execbuffer {
/**
* List of buffers to be validated with their
* relocations to be performend on them.
*
* These buffers must be listed in an order such that
* all relocations a buffer is performing refer to
* buffers that have already appeared in the validate
* list.
*/
/* struct drm_i915_gem_validate_entry *buffers */
uint64_t buffers_ptr;
uint32_t buffer_count;
/**
* Offset in the batchbuffer to start execution from.
*/
uint32_t batch_start_offset;
/**
* Bytes used in batchbuffer from batch_start_offset
*/
uint32_t batch_len;
uint32_t DR1;
uint32_t DR4;
uint32_t num_cliprects;
uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
};
'buffers_ptr' is a user-mode pointer to an array of 'buffer_count'
drm_i915_gem_exec_object structures which contains the complete set
of objects required for this execbuffer operation. The last entry in
this array, the 'batch buffer', is the buffer of commands which will
be linked to the ring and executed.
'batch_start_offset' is the byte offset within the batch buffer which
contains the first command to execute. So far, we haven't found a
reason to use anything other than '0' here, but the thought was that
some space might be allocated for additional initialization which
could be skipped in some cases. This must be a multiple of 4.
'batch_len' is the length, in bytes, of the data to be executed
(i.e., the amount of data after batch_start_offset). This must
be a multiple of 4.
'num_cliprects' and 'cliprects_ptr' reference an array of
drm_clip_rect structures that is num_cliprects long. The entire
batch buffer will be executed multiple times, once for each
rectangle in this list. If num_cliprects is 0, then no clipping
rectangle will be set.
'DR1' and 'DR4' are portions of the 3DSTATE_DRAWING_RECTANGLE
command which will be queued when this operation is clipped
(num_cliprects != 0).
DR1 bit definition
31 Fast Scissor Clip Disable (debug only).
Disables a hardware optimization that
improves performance. This should have
no visible effect, other than reducing
performance
30 Depth Buffer Coordinate Offset Disable.
This disables the addition of the
depth buffer offset bits which are used
to change the location of the depth buffer
relative to the front buffer.
27:26 X Dither Offset. Specifies the X pixel
offset to use when accessing the dither table
25:24 Y Dither Offset. Specifies the Y pixel
offset to use when accessing the dither
table.
DR4 bit definition
31:16 Drawing Rectangle Origin Y. Specifies the Y
origin of coordinates relative to the
draw buffer.
15:0 Drawing Rectangle Origin X. Specifies the X
origin of coordinates relative to the
draw buffer.
As you can see, these two fields are necessary for correctly
offsetting drawing within a buffer which contains multiple surfaces.
Note that DR1 is only used on Gen3 and earlier hardware and that
newer hardware sticks the dither offset elsewhere.
7.3.1 Detailed Execution Description
Execution of a single batch buffer requires several preparatory
steps to make the objects visible to the graphics engine and resolve
relocations to account for their current addresses.
A. Mapping and Relocation
Each exec_object structure in the array is examined in turn.
If the object is not already bound to the GTT, it is assigned a
location in the graphics address space. If no space is available in
the GTT, some other object will be evicted. This may require waiting
for previous execbuffer requests to complete before that object can
be unmapped. With the location assigned, the pages for the object
are pinned in memory using find_or_create_page and the GTT entries
updated to point at the relevant pages using drm_agp_bind_pages.
Then the array of relocations is traversed. Each relocation record
looks up the target object and, if the presumed offset does not
match the current offset (remember that this buffer has already been
assigned an address as it must have been mapped earlier), the
relocation value is computed using the current offset. If the
object is currently in use by the graphics engine, writing the data
out must be preceeded by a delay while the object is still busy.
Once it is idle, then the page containing the relocation is mapped
by the CPU and the updated relocation data written out.
The read_domains and write_domain entries in each relocation are
used to compute the new read_domains and write_domain values for the
target buffers. The actual execution of the domain changes must wait
until all of the exec_object entries have been evaluated as the
complete set of domain information will not be available until then.
B. Memory Domain Resolution
After all of the new memory domain data has been pulled out of the
relocations and computed for each object, the list of objects is
again traversed and the new memory domains compared against the
current memory domains. There are two basic operations involved here:
* Flushing the current write domain. If the new read domains
are not equal to the current write domain, then the current
write domain must be flushed. Otherwise, reads will not see data
present in the write domain cache. In addition, any new read domains
other than the current write domain must be invalidated to ensure
that the flushed data are re-read into their caches.
* Invaliding new read domains. Any domains which were not currently
used for this object must be invalidated as old objects which
were mapped at the same location may have stale data in the new
domain caches.
If the CPU cache is being invalidated and some GPU cache is being
flushed, then we'll have to wait for rendering to complete so that
any pending GPU writes will be complete before we flush the GPU
cache.
If the CPU cache is being flushed, then we use 'clflush' to get data
written from the CPU.
Because the GPU caches cannot be partially flushed or invalidated,
we don't actually flush them during this traversal stage. Rather, we
gather the invalidate and flush bits up in the device structure.
Once all of the object domain changes have been evaluated, then the
gathered invalidate and flush bits are examined. For any GPU flush
operations, we emit a single MI_FLUSH command that performs all of
the necessary flushes. We then look to see if the CPU cache was
flushed. If so, we use the chipset flush magic (writing to a special
page) to get the data out of the chipset and into memory.
C. Queuing Batch Buffer to the Ring
With all of the objects resident in graphics memory space, and all
of the caches prepared with appropriate data, the batch buffer
object can be queued to the ring. If there are clip rectangles, then
the buffer is queued once per rectangle, with suitable clipping
inserted into the ring just before the batch buffer.
D. Creating an IRQ Cookie
Right after the batch buffer is placed in the ring, a request to
generate an IRQ is added to the ring along with a command to write a
marker into memory. When the IRQ fires, the driver can look at the
memory location to see where in the ring the GPU has passed. This
magic cookie value is stored in each object used in this execbuffer
command; it is used whereever you saw 'wait for rendering' above in
this document.
E. Writing back the new object offsets
So that the application has a better idea what to use for
'presumed_offset' values later, the current object offsets are
written back to the exec_object structures.
8. Other misc Intel-specific functions.
To complete the driver, a few other functions were necessary.
8.1 Initialization from the X server
As the X server is currently responsible for apportioning memory between 2D
and 3D, it must tell the kernel which region of the GTT aperture is
available for 3D objects to be mapped into.
struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the
* DRM memory manager.
*/
uint64_t gtt_start;
/**
* Ending offset in the GTT to be managed by the DRM
* memory manager.
*/
uint64_t gtt_end;
};
/* usage */
init.gtt_start = <gtt_start>;
init.gtt_end = <gtt_end>;
ret = ioctl (fd, DRM_IOCTL_I915_GEM_INIT, &init);
The GTT aperture between gtt_start and gtt_end will be used to map
objects. This also tells the kernel that the ring can be used,
pulling the ring addresses from the device registers.
8.2 Pinning objects in the GTT
For scan-out buffers and the current shared depth and back buffers, we need
to have them always available in the GTT, at least for now. Pinning means to
lock their pages in memory along with keeping them at a fixed offset in the
graphics aperture. These operations are available only to root.
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
uint32_t handle;
uint32_t pad;
/** alignment required within the aperture */
uint64_t alignment;
/** Returned GTT offset of the buffer. */
uint64_t offset;
};
/* usage */
pin.handle = <handle>;
pin.alignment = <alignment>;
ret = ioctl (fd, DRM_IOCTL_I915_GEM_PIN, &pin);
if (ret == 0)
return pin.offset;
Pinning an object ensures that it will not be evicted from the GTT
or moved. It will stay resident until destroyed or unpinned.
struct drm_i915_gem_unpin {
/** Handle of the buffer to be unpinned. */
uint32_t handle;
uint32_t pad;
};
/* usage */
unpin.handle = <handle>;
ret = ioctl (fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
Unpinning an object makes it possible to evict this object from the
GTT. It doesn't ensure that it will be evicted, just that it may.

View File

@ -54,6 +54,7 @@
#include <linux/smp_lock.h> /* For (un)lock_kernel */
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/kref.h>
#include <linux/pagemap.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
#include <linux/mutex.h>
@ -89,6 +90,10 @@
struct drm_device;
struct drm_file;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
typedef unsigned long uintptr_t;
#endif
/* If you want the memory alloc debug functionality, change define below */
/* #define DEBUG_MEMORY */
@ -107,7 +112,7 @@ struct drm_file;
#define DRIVER_IRQ_SHARED 0x80
#define DRIVER_DMA_QUEUE 0x100
#define DRIVER_FB_DMA 0x200
#define DRIVER_GEM 0x400
/*@}*/
@ -427,6 +432,11 @@ struct drm_file {
struct list_head refd_objects;
/** Mapping of mm object handles to object pointers. */
struct idr object_idr;
/** Lock for synchronization of access to object_idr. */
spinlock_t table_lock;
struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
struct file *filp;
void *driver_priv;
@ -604,6 +614,56 @@ struct drm_ati_pcigart_info {
int table_size;
};
/**
* This structure defines the drm_mm memory object, which will be used by the
* DRM for its buffer objects.
*/
struct drm_gem_object {
/** Reference count of this object */
struct kref refcount;
/** Handle count of this object. Each handle also holds a reference */
struct kref handlecount;
/** Related drm device */
struct drm_device *dev;
/** File representing the shmem storage */
struct file *filp;
/**
* Size of the object, in bytes. Immutable over the object's
* lifetime.
*/
size_t size;
/**
* Global name for this object, starts at 1. 0 means unnamed.
* Access is covered by the object_name_lock in the related drm_device
*/
int name;
/**
* Memory domains. These monitor which caches contain read/write data
* related to the object. When transitioning from one set of domains
* to another, the driver is called to ensure that caches are suitably
* flushed and invalidated
*/
uint32_t read_domains;
uint32_t write_domain;
/**
* While validating an exec operation, the
* new read/write domain values are computed here.
* They will be transferred to the above values
* at the point that any cache flushing occurs
*/
uint32_t pending_read_domains;
uint32_t pending_write_domain;
void *driver_private;
};
#include "drm_objects.h"
/**
@ -705,6 +765,18 @@ struct drm_driver {
void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
int (*proc_init)(struct drm_minor *minor);
void (*proc_cleanup)(struct drm_minor *minor);
/**
* Driver-specific constructor for drm_gem_objects, to set up
* obj->driver_private.
*
* Returns 0 on success.
*/
int (*gem_init_object) (struct drm_gem_object *obj);
void (*gem_free_object) (struct drm_gem_object *obj);
struct drm_fence_driver *fence_driver;
struct drm_bo_driver *bo_driver;
@ -892,6 +964,21 @@ struct drm_device {
spinlock_t drw_lock;
struct idr drw_idr;
/*@} */
/** \name GEM information */
/*@{ */
spinlock_t object_name_lock;
struct idr object_name_idr;
atomic_t object_count;
atomic_t object_memory;
atomic_t pin_count;
atomic_t pin_memory;
atomic_t gtt_count;
atomic_t gtt_memory;
uint32_t gtt_total;
uint32_t invalidate_domains; /* domains pending invalidation */
uint32_t flush_domains; /* domains pending flush */
/*@} */
};
#if __OS_HAS_AGP
@ -1007,6 +1094,10 @@ extern void drm_free_pages(unsigned long address, int order, int area);
extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
struct page **pages,
unsigned long num_pages,
uint32_t gtt_offset);
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
extern void drm_free_memctl(size_t size);
@ -1200,7 +1291,7 @@ extern void drm_agp_chipset_flush(struct drm_device *dev);
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
extern int drm_put_dev(struct drm_device *dev);
extern int drm_put_minor(struct drm_minor **minor);
extern int drm_put_minor(struct drm_device *dev);
extern unsigned int drm_debug; /* 1 to enable debug output */
extern struct class *drm_class;
@ -1260,6 +1351,70 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
return block->mm;
}
/* Graphics Execution Manager library functions (drm_gem.c) */
int
drm_gem_init (struct drm_device *dev);
void
drm_gem_object_free (struct kref *kref);
struct drm_gem_object *
drm_gem_object_alloc(struct drm_device *dev, size_t size);
void
drm_gem_object_handle_free (struct kref *kref);
static inline void drm_gem_object_reference(struct drm_gem_object *obj)
{
kref_get(&obj->refcount);
}
static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
{
if (obj == NULL)
return;
kref_put (&obj->refcount, drm_gem_object_free);
}
int
drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
int *handlep);
static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj)
{
drm_gem_object_reference (obj);
kref_get(&obj->handlecount);
}
static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj)
{
if (obj == NULL)
return;
/*
* Must bump handle count first as this may be the last
* ref, in which case the object would disappear before we
* checked for a name
*/
kref_put (&obj->handlecount, drm_gem_object_handle_free);
drm_gem_object_unreference (obj);
}
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
int handle);
int drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);

View File

@ -484,7 +484,50 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
return agp_unbind_memory(handle);
}
/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
*
* No reference is held on the pages during this time -- it is up to the
* caller to handle that.
*/
DRM_AGP_MEM *
drm_agp_bind_pages(struct drm_device *dev,
struct page **pages,
unsigned long num_pages,
uint32_t gtt_offset)
{
DRM_AGP_MEM *mem;
int ret, i;
DRM_DEBUG("drm_agp_populate_ttm\n");
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY);
#else
mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
AGP_USER_MEMORY);
#endif
if (mem == NULL) {
DRM_ERROR("Failed to allocate memory for %ld pages\n",
num_pages);
return NULL;
}
for (i = 0; i < num_pages; i++)
mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
mem->page_count = num_pages;
mem->is_flushed = TRUE;
ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
if (ret != 0) {
DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
agp_free_memory(mem);
return NULL;
}
return mem;
}
EXPORT_SYMBOL(drm_agp_bind_pages);
/*
* AGP ttm backend interface.

View File

@ -332,7 +332,7 @@ typedef _Bool bool;
#endif
#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIMEM))
#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM))
#define DRM_KMAP_ATOMIC_PROT_PFN
extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
pgprot_t protection);

View File

@ -150,6 +150,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@ -418,7 +422,7 @@ static void drm_cleanup(struct drm_device * dev)
drm_mm_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
drm_put_minor(&dev->primary);
drm_put_minor(dev);
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");
}

View File

@ -274,6 +274,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_free;
}
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
@ -444,6 +447,9 @@ int drm_release(struct inode *inode, struct file *filp)
dev->driver->reclaim_buffers(dev, file_priv);
}
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
drm_fasync(-1, filp, 0);
mutex_lock(&dev->ctxlist_mutex);

420
linux-core/drm_gem.c Normal file
View File

@ -0,0 +1,420 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include "drmP.h"
/** @file drm_gem.c
*
* This file provides some of the base ioctls and library routines for
* the graphics memory manager implemented by each device driver.
*
* Because various devices have different requirements in terms of
* synchronization and migration strategies, implementing that is left up to
* the driver, and all that the general API provides should be generic --
* allocating objects, reading/writing data with the cpu, freeing objects.
* Even there, platform-dependent optimizations for reading/writing data with
* the CPU mean we'll likely hook those out to driver-specific calls. However,
* the DRI2 implementation wants to have at least allocate/mmap be generic.
*
* The goal was to have swap-backed object allocation managed through
* struct file. However, file descriptors as handles to a struct file have
* two major failings:
* - Process limits prevent more than 1024 or so being used at a time by
* default.
* - Inability to allocate high fds will aggravate the X Server's select()
* handling, and likely that of many GL client applications as well.
*
* This led to a plan of using our own integer IDs (called handles, following
* DRM terminology) to mimic fds, and implement the fd syscalls we need as
* ioctls. The objects themselves will still include the struct file so
* that we can transition to fds if the required kernel infrastructure shows
* up at a later date, and as our interface with shmfs for memory allocation.
*/
/**
* Initialize the GEM device fields
*/
int
drm_gem_init(struct drm_device *dev)
{
spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
atomic_set(&dev->object_count, 0);
atomic_set(&dev->object_memory, 0);
atomic_set(&dev->pin_count, 0);
atomic_set(&dev->pin_memory, 0);
atomic_set(&dev->gtt_count, 0);
atomic_set(&dev->gtt_memory, 0);
return 0;
}
/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
obj->dev = dev;
obj->filp = shmem_file_setup("drm mm object", size, 0);
if (IS_ERR(obj->filp)) {
kfree(obj);
return NULL;
}
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
fput(obj->filp);
kfree(obj);
return NULL;
}
atomic_inc(&dev->object_count);
atomic_add(obj->size, &dev->object_memory);
return obj;
}
EXPORT_SYMBOL(drm_gem_object_alloc);
/**
* Removes the mapping from handle to filp for this object.
*/
static int
drm_gem_handle_delete(struct drm_file *filp, int handle)
{
struct drm_device *dev;
struct drm_gem_object *obj;
/* This is gross. The idr system doesn't let us try a delete and
* return an error code. It just spews if you fail at deleting.
* So, we have to grab a lock around finding the object and then
* doing the delete on it and dropping the refcount, or the user
* could race us to double-decrement the refcount and cause a
* use-after-free later. Given the frequency of our handle lookups,
* we may want to use ida for number allocation and a hash table
* for the pointers, anyway.
*/
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle);
if (obj == NULL) {
spin_unlock(&filp->table_lock);
return -EINVAL;
}
dev = obj->dev;
/* Release reference and decrement refcount. */
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
mutex_lock(&dev->struct_mutex);
drm_gem_object_handle_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return 0;
}
/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
int
drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
int *handlep)
{
int ret;
/*
* Get the user-visible handle using idr.
*/
again:
/* ensure there is space available to allocate a handle */
if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
return -ENOMEM;
/* do the allocation under our spinlock */
spin_lock(&file_priv->table_lock);
ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
spin_unlock(&file_priv->table_lock);
if (ret == -EAGAIN)
goto again;
if (ret != 0)
return ret;
drm_gem_object_handle_reference(obj);
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_create);
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
int handle)
{
struct drm_gem_object *obj;
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle);
if (obj == NULL) {
spin_unlock(&filp->table_lock);
return NULL;
}
drm_gem_object_reference(obj);
spin_unlock(&filp->table_lock);
return obj;
}
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
* Releases the handle to an mm object.
*/
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_close *args = data;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
ret = drm_gem_handle_delete(file_priv, args->handle);
return ret;
}
/**
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
* is freed, the name goes away.
*/
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_flink *args = data;
struct drm_gem_object *obj;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
again:
if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
return -ENOMEM;
spin_lock(&dev->object_name_lock);
if (obj->name) {
spin_unlock(&dev->object_name_lock);
return -EEXIST;
}
ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
&obj->name);
spin_unlock(&dev->object_name_lock);
if (ret == -EAGAIN)
goto again;
if (ret != 0) {
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/*
* Leave the reference from the lookup around as the
* name table now holds one
*/
args->name = (uint64_t) obj->name;
return 0;
}
/**
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
* will not go away until the handle is deleted.
*/
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_open *args = data;
struct drm_gem_object *obj;
int ret;
int handle;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
spin_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
if (obj)
drm_gem_object_reference(obj);
spin_unlock(&dev->object_name_lock);
if (!obj)
return -ENOENT;
ret = drm_gem_handle_create(file_priv, obj, &handle);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
if (ret)
return ret;
args->handle = handle;
args->size = obj->size;
return 0;
}
/**
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
void
drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
{
idr_init(&file_private->object_idr);
spin_lock_init(&file_private->table_lock);
}
/**
* Called at device close to release the file's
* handle references on objects.
*/
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
drm_gem_object_handle_unreference(obj);
return 0;
}
/**
* Called at close time when the filp is going away.
*
* Releases any remaining references on objects by this filp.
*/
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
mutex_lock(&dev->struct_mutex);
idr_for_each(&file_private->object_idr,
&drm_gem_object_release_handle, NULL);
idr_destroy(&file_private->object_idr);
mutex_unlock(&dev->struct_mutex);
}
/**
* Called after the last reference to the object has been lost.
*
* Frees the object
*/
void
drm_gem_object_free(struct kref *kref)
{
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_device *dev = obj->dev;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
fput(obj->filp);
atomic_dec(&dev->object_count);
atomic_sub(obj->size, &dev->object_memory);
kfree(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
* called before drm_gem_object_free or we'll be touching
* freed memory
*/
void
drm_gem_object_handle_free(struct kref *kref)
{
struct drm_gem_object *obj = container_of(kref,
struct drm_gem_object,
handlecount);
struct drm_device *dev = obj->dev;
/* Remove any name for this object */
spin_lock(&dev->object_name_lock);
if (obj->name) {
idr_remove(&dev->object_name_idr, obj->name);
spin_unlock(&dev->object_name_lock);
/*
* The object name held a reference to this object, drop
* that now.
*/
drm_gem_object_unreference(obj);
} else
spin_unlock(&dev->object_name_lock);
}
EXPORT_SYMBOL(drm_gem_object_handle_free);

View File

@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
p->irq = dev->irq;
p->irq = dev->pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
@ -128,6 +128,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
(unsigned long)dev);
init_timer_deferrable(&dev->vblank_disable_timer);
spin_lock_init(&dev->vbl_lock);
atomic_set(&dev->vbl_signal_pending, 0);
dev->num_crtcs = num_crtcs;
@ -201,7 +202,7 @@ int drm_irq_install(struct drm_device * dev)
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
if (dev->irq == 0)
if (dev->pdev->irq == 0)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
@ -219,7 +220,7 @@ int drm_irq_install(struct drm_device * dev)
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("irq=%d\n", dev->irq);
DRM_DEBUG("irq=%d\n", dev->pdev->irq);
/* Before installing handler */
dev->driver->irq_preinstall(dev);
@ -228,7 +229,7 @@ int drm_irq_install(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = IRQF_SHARED;
ret = request_irq(dev->irq, dev->driver->irq_handler,
ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
sh_flags, dev->devname, dev);
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
@ -236,6 +237,10 @@ int drm_irq_install(struct drm_device * dev)
mutex_unlock(&dev->struct_mutex);
return ret;
}
/* Expose the device irq to device drivers that want to export it for
* whatever reason.
*/
dev->irq = dev->pdev->irq;
/* After installing handler */
ret = dev->driver->irq_postinstall(dev);
@ -271,11 +276,11 @@ int drm_irq_uninstall(struct drm_device * dev)
if (!irq_enabled)
return -EINVAL;
DRM_DEBUG("irq=%d\n", dev->irq);
DRM_DEBUG("irq=%d\n", dev->pdev->irq);
dev->driver->irq_uninstall(dev);
free_irq(dev->irq, dev);
free_irq(dev->pdev->irq, dev);
drm_vblank_cleanup(dev);
@ -309,7 +314,7 @@ int drm_control(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != dev->irq)
ctl->irq != dev->pdev->irq)
return -EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
@ -514,7 +519,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
int ret = 0;
unsigned int flags, seq, crtc;
if ((!dev->irq) || (!dev->irq_enabled))
if ((!dev->pdev->irq) || (!dev->irq_enabled))
return -EINVAL;
if (vblwait->request.type &

View File

@ -218,22 +218,16 @@ int drm_lock_take(struct drm_lock_data *lock_data,
} while (prev != old);
spin_unlock_bh(&lock_data->spinlock);
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
}
return 0;
/* Warn on recursive locking of user contexts. */
if (_DRM_LOCKING_CONTEXT(old) == context && _DRM_LOCK_IS_HELD(old)) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
}
return 0;
}
if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
/* Have lock */
return 1;
}
return 0;
return !_DRM_LOCK_IS_HELD(old);
}
/**
@ -386,7 +380,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
EXPORT_SYMBOL(drm_idlelock_release);
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{

View File

@ -310,6 +310,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
return drm_agp_free_memory(handle) ? 0 : -EINVAL;
}
EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@ -322,6 +323,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return drm_agp_unbind_memory(handle);
}
EXPORT_SYMBOL(drm_unbind_agp);
#else /* __OS_HAS_AGP*/
static void *agp_remap(unsigned long offset, unsigned long size,

View File

@ -167,6 +167,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
return child;
}
EXPORT_SYMBOL(drm_mm_get_block);
/*
* Put a block. Merge with the previous and / or next block if they are free.
@ -257,6 +258,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
return best;
}
EXPORT_SYMBOL(drm_mm_search_free);
int drm_mm_clean(struct drm_mm * mm)
{

View File

@ -301,7 +301,12 @@ struct drm_ttm_backend_func {
void (*destroy) (struct drm_ttm_backend *backend);
};
/**
* This structure associates a set of flags and methods with a drm_ttm
* object, and will also be subclassed by the particular backend.
*
* \sa #drm_agp_ttm_backend
*/
struct drm_ttm_backend {
struct drm_device *dev;
uint32_t flags;

View File

@ -51,6 +51,10 @@ static int drm_bufs_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_objects_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_gem_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_gem_object_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
#if DRM_DEBUG_CODE
static int drm_vma_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
@ -70,6 +74,8 @@ static struct drm_proc_list {
{"queues", drm_queues_info},
{"bufs", drm_bufs_info},
{"objects", drm_objects_info},
{"gem_names", drm_gem_name_info},
{"gem_objects", drm_gem_object_info},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info},
#endif
@ -582,6 +588,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
return ret;
}
struct drm_gem_name_info_data {
int len;
char *buf;
int eof;
};
static int drm_gem_one_name_info(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
struct drm_gem_name_info_data *nid = data;
DRM_INFO("name %d size %d\n", obj->name, obj->size);
if (nid->eof)
return 0;
nid->len += sprintf(&nid->buf[nid->len],
"%6d%9d%8d%9d\n",
obj->name, obj->size,
atomic_read(&obj->handlecount.refcount),
atomic_read(&obj->refcount.refcount));
if (nid->len > DRM_PROC_LIMIT) {
nid->eof = 1;
return 0;
}
return 0;
}
static int drm_gem_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
struct drm_gem_name_info_data nid;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
nid.len = sprintf(buf, " name size handles refcount\n");
nid.buf = buf;
nid.eof = 0;
idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
*start = &buf[offset];
*eof = 0;
if (nid.len > request + offset)
return request;
*eof = 1;
return nid.len - offset;
}
static int drm_gem_object_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
#if DRM_DEBUG_CODE
static int drm__vma_info(char *buf, char **start, off_t offset, int request,

View File

@ -163,7 +163,16 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
goto error_out_unreg;
}
if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init (dev);
if (retcode) {
DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
goto error_out_unreg;
}
}
drm_fence_manager_init(dev);
return 0;
error_out_unreg:
@ -213,6 +222,13 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
goto err_mem;
}
if (dev->driver->proc_init) {
ret = dev->driver->proc_init(new_minor);
if (ret) {
DRM_ERROR("DRM: Driver failed to initialize /proc/dri.\n");
goto err_mem;
}
}
} else
new_minor->dev_root = NULL;
@ -229,8 +245,11 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
err_g2:
if (new_minor->type == DRM_MINOR_LEGACY)
if (new_minor->type == DRM_MINOR_LEGACY) {
if (dev->driver->proc_cleanup)
dev->driver->proc_cleanup(new_minor);
drm_proc_cleanup(new_minor, drm_proc_root);
}
err_mem:
kfree(new_minor);
err_idr:
@ -293,7 +312,7 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
return 0;
err_g4:
drm_put_minor(&dev->primary);
drm_put_minor(dev);
err_g3:
if (!drm_fb_loaded)
pci_disable_device(pdev);
@ -349,13 +368,17 @@ int drm_put_dev(struct drm_device * dev)
* last minor released.
*
*/
int drm_put_minor(struct drm_minor **minor_p)
int drm_put_minor(struct drm_device *dev)
{
struct drm_minor **minor_p = &dev->primary;
struct drm_minor *minor = *minor_p;
DRM_DEBUG("release secondary minor %d\n", minor->index);
if (minor->type == DRM_MINOR_LEGACY)
if (minor->type == DRM_MINOR_LEGACY) {
if (dev->driver->proc_cleanup)
dev->driver->proc_cleanup(minor);
drm_proc_cleanup(minor, drm_proc_root);
}
drm_sysfs_device_remove(minor);
idr_remove(&drm_minors_idr, minor->index);

View File

@ -111,18 +111,22 @@ static int i915_resume(struct drm_device *dev)
}
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void remove(struct pci_dev *pdev);
static struct drm_driver driver = {
/* don't use mtrr's here, the Xserver or user space app should
* deal with them for intel hardware.
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
.load = i915_driver_load,
.unload = i915_driver_unload,
.firstopen = i915_driver_firstopen,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
.suspend = i915_suspend,
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
@ -136,7 +140,11 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
.proc_init = i915_gem_proc_init,
.proc_cleanup = i915_gem_proc_cleanup,
.ioctls = i915_ioctls,
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
@ -153,7 +161,7 @@ static struct drm_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
.remove = remove,
},
#ifdef I915_HAVE_FENCE
.fence_driver = &i915_fence_driver,
@ -171,7 +179,28 @@ static struct drm_driver driver = {
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_dev(pdev, ent, &driver);
int ret;
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
* correctly in testing on 945G.
* This may be a side effect of MSI having been made available for PEG
* and the registers being closely associated.
*/
if (pdev->device != 0x2772 && pdev->device != 0x27A2)
(void )pci_enable_msi(pdev);
ret = drm_get_dev(pdev, ent, &driver);
if (ret && pdev->msi_enabled)
pci_disable_msi(pdev);
return ret;
}
static void remove(struct pci_dev *pdev)
{
if (pdev->msi_enabled)
pci_disable_msi(pdev);
drm_cleanup_pci(pdev);
}
static int __init i915_init(void)

2502
linux-core/i915_gem.c Normal file

File diff suppressed because it is too large Load Diff

202
linux-core/i915_gem_debug.c Normal file
View File

@ -0,0 +1,202 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Keith Packard <keithp@keithp.com>
*
*/
#include "drmP.h"
#include "drm.h"
#include "drm_compat.h"
#include "i915_drm.h"
#include "i915_drv.h"
#if WATCH_INACTIVE
void
i915_verify_inactive(struct drm_device *dev, char *file, int line)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
obj = obj_priv->obj;
if (obj_priv->pin_count || obj_priv->active ||
(obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
I915_GEM_DOMAIN_GTT)))
DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
obj,
obj_priv->pin_count, obj_priv->active,
obj->write_domain, file, line);
}
}
#endif /* WATCH_INACTIVE */
#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
static void
i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
uint32_t bias, uint32_t mark)
{
uint32_t *mem = kmap_atomic(page, KM_USER0);
int i;
for (i = start; i < end; i += 4)
DRM_INFO("%08x: %08x%s\n",
(int) (bias + i), mem[i / 4],
(bias + i == mark) ? " ********" : "");
kunmap_atomic(mem, KM_USER0);
/* give syslog time to catch up */
msleep(1);
}
void
i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page;
DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
int page_len, chunk, chunk_len;
page_len = len - page * PAGE_SIZE;
if (page_len > PAGE_SIZE)
page_len = PAGE_SIZE;
for (chunk = 0; chunk < page_len; chunk += 128) {
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
i915_gem_dump_page(obj_priv->page_list[page],
chunk, chunk + chunk_len,
obj_priv->gtt_offset +
page * PAGE_SIZE,
mark);
}
}
}
#endif
#if WATCH_LRU
void
i915_dump_lru(struct drm_device *dev, const char *where)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
DRM_INFO("active list %s {\n", where);
list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
list)
{
DRM_INFO(" %p: %08x\n", obj_priv,
obj_priv->last_rendering_seqno);
}
DRM_INFO("}\n");
DRM_INFO("flushing list %s {\n", where);
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
list)
{
DRM_INFO(" %p: %08x\n", obj_priv,
obj_priv->last_rendering_seqno);
}
DRM_INFO("}\n");
DRM_INFO("inactive %s {\n", where);
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
DRM_INFO(" %p: %08x\n", obj_priv,
obj_priv->last_rendering_seqno);
}
DRM_INFO("}\n");
}
#endif
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
__func__, obj, obj_priv->gtt_offset, handle,
obj->size / 1024);
gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
obj->size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
}
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
goto out;
}
for (i = 0; i < PAGE_SIZE / 4; i++) {
uint32_t cpuval = backing_map[i];
uint32_t gttval = readl(gtt_mapping +
page * 1024 + i);
if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n",
(int)(obj_priv->gtt_offset +
page * PAGE_SIZE + i * 4),
cpuval, gttval);
if (bad_count++ >= 8) {
DRM_INFO("...\n");
goto out;
}
}
}
kunmap_atomic(backing_map, KM_USER0);
backing_map = NULL;
}
out:
if (backing_map != NULL)
kunmap_atomic(backing_map, KM_USER0);
iounmap(gtt_mapping);
/* give syslog time to catch up */
msleep(1);
/* Directly flush the object, since we just loaded values with the CPU
* from the backing pages and we don't want to disturb the cache
* management that we're trying to observe.
*/
i915_gem_clflush_object(obj);
}
#endif

293
linux-core/i915_gem_proc.c Normal file
View File

@ -0,0 +1,293 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Keith Packard <keithp@keithp.com>
*
*/
#include "drmP.h"
#include "drm.h"
#include "drm_compat.h"
#include "i915_drm.h"
#include "i915_drv.h"
static int i915_gem_active_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Active:\n");
list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
list)
{
struct drm_gem_object *obj = obj_priv->obj;
if (obj->name) {
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
obj, obj->name,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
} else {
DRM_PROC_PRINT(" %p: %08x %08x %d\n",
obj,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
}
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Flushing:\n");
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
list)
{
struct drm_gem_object *obj = obj_priv->obj;
if (obj->name) {
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
obj, obj->name,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
} else {
DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
}
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Inactive:\n");
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
list)
{
struct drm_gem_object *obj = obj_priv->obj;
if (obj->name) {
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
obj, obj->name,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
} else {
DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
}
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_request_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Request:\n");
list_for_each_entry(gem_request, &dev_priv->mm.request_list,
list)
{
DRM_PROC_PRINT(" %d @ %d %08x\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies),
gem_request->flush_domains);
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
DRM_PROC_PRINT("Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_interrupt_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Interrupt enable: %08x\n",
I915_READ(IER));
DRM_PROC_PRINT("Interrupt identity: %08x\n",
I915_READ(IIR));
DRM_PROC_PRINT("Interrupt mask: %08x\n",
I915_READ(IMR));
DRM_PROC_PRINT("Pipe A stat: %08x\n",
I915_READ(PIPEASTAT));
DRM_PROC_PRINT("Pipe B stat: %08x\n",
I915_READ(PIPEBSTAT));
DRM_PROC_PRINT("Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
DRM_PROC_PRINT("Current sequence: %d\n",
i915_get_gem_seqno(dev));
DRM_PROC_PRINT("Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
DRM_PROC_PRINT("IRQ sequence: %d\n",
dev_priv->mm.irq_gem_seqno);
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static struct drm_proc_list {
/** file name */
const char *name;
/** proc callback*/
int (*f) (char *, char **, off_t, int, int *, void *);
} i915_gem_proc_list[] = {
{"i915_gem_active", i915_gem_active_info},
{"i915_gem_flushing", i915_gem_flushing_info},
{"i915_gem_inactive", i915_gem_inactive_info},
{"i915_gem_request", i915_gem_request_info},
{"i915_gem_seqno", i915_gem_seqno_info},
{"i915_gem_interrupt", i915_interrupt_info},
};
#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
int i915_gem_proc_init(struct drm_minor *minor)
{
struct proc_dir_entry *ent;
int i, j;
for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
ent = create_proc_entry(i915_gem_proc_list[i].name,
S_IFREG | S_IRUGO, minor->dev_root);
if (!ent) {
DRM_ERROR("Cannot create /proc/dri/.../%s\n",
i915_gem_proc_list[i].name);
for (j = 0; j < i; j++)
remove_proc_entry(i915_gem_proc_list[i].name,
minor->dev_root);
return -1;
}
ent->read_proc = i915_gem_proc_list[i].f;
ent->data = minor;
}
return 0;
}
void i915_gem_proc_cleanup(struct drm_minor *minor)
{
int i;
if (!minor->dev_root)
return;
for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
}

View File

@ -0,0 +1,309 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
/** @file i915_gem_tiling.c
*
* Support for managing tiling state of buffer objects.
*
* The idea behind tiling is to increase cache hit rates by rearranging
* pixel data so that a group of pixel accesses are in the same cacheline.
* Performance improvement from doing this on the back/depth buffer are on
* the order of 30%.
*
* Intel architectures make this somewhat more complicated, though, by
* adjustments made to addressing of data when the memory is in interleaved
* mode (matched pairs of DIMMS) to improve memory bandwidth.
* For interleaved memory, the CPU sends every sequential 64 bytes
* to an alternate memory channel so it can get the bandwidth from both.
*
* The GPU also rearranges its accesses for increased bandwidth to interleaved
* memory, and it matches what the CPU does for non-tiled. However, when tiled
* it does it a little differently, since one walks addresses not just in the
* X direction but also Y. So, along with alternating channels when bit
* 6 of the address flips, it also alternates when other bits flip -- Bits 9
* (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
* are common to both the 915 and 965-class hardware.
*
* The CPU also sometimes XORs in higher bits as well, to improve
* bandwidth doing strided access like we do so frequently in graphics. This
* is called "Channel XOR Randomization" in the MCH documentation. The result
* is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
* decode.
*
* All of this bit 6 XORing has an effect on our memory management,
* as we need to make sure that the 3d driver can correctly address object
* contents.
*
* If we don't have interleaved memory, all tiling is safe and no swizzling is
* required.
*
* When bit 17 is XORed in, we simply refuse to tile at all. Bit
* 17 is not just a page offset, so as we page an objet out and back in,
* individual pages in it will have different bit 17 addresses, resulting in
* each 64 bytes being swapped with its neighbor!
*
* Otherwise, if interleaved, we have to tell the 3d driver what the address
* swizzling it needs to do is, since it's writing with the CPU to the pages
* (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
* pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
* required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
* to match what the GPU expects.
*/
/**
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct pci_dev *bridge;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
int mchbar_offset;
char __iomem *mchbar;
int ret;
bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (bridge == NULL) {
DRM_ERROR("Couldn't get bridge device\n");
return;
}
ret = pci_enable_device(bridge);
if (ret != 0) {
DRM_ERROR("pci_enable_device failed: %d\n", ret);
return;
}
if (IS_I965G(dev))
mchbar_offset = 0x48;
else
mchbar_offset = 0x44;
/* Use resource 2 for our BAR that's stashed in a nonstandard location,
* since the bridge would only ever use standard BARs 0-1 (though it
* doesn't anyway)
*/
ret = pci_read_base(bridge, mchbar_offset, &bridge->resource[2]);
if (ret != 0) {
DRM_ERROR("pci_read_base failed: %d\n", ret);
return;
}
mchbar = ioremap(pci_resource_start(bridge, 2),
pci_resource_len(bridge, 2));
if (mchbar == NULL) {
DRM_ERROR("Couldn't map MCHBAR to determine tile swizzling\n");
return;
}
if (IS_I965G(dev) && !IS_I965GM(dev)) {
uint32_t chdecmisc;
/* On the 965, channel interleave appears to be determined by
* the flex bit. If flex is set, then the ranks (sides of a
* DIMM) of memory will be "stacked" (physical addresses walk
* through one rank then move on to the next, flipping channels
* or not depending on rank configuration). The GPU in this
* case does exactly the same addressing as the CPU.
*
* Unlike the 945, channel randomization based does not
* appear to be available.
*
* XXX: While the G965 doesn't appear to do any interleaving
* when the DIMMs are not exactly matched, the G4x chipsets
* might be for "L-shaped" configurations, and will need to be
* detected.
*
* L-shaped configuration:
*
* +-----+
* | |
* |DIMM2| <-- non-interleaved
* +-----+
* +-----+ +-----+
* | | | |
* |DIMM0| |DIMM1| <-- interleaved area
* +-----+ +-----+
*/
chdecmisc = readb(mchbar + CHDECMISC);
if (chdecmisc == 0xff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
} else if (chdecmisc & CHDECMISC_FLEXMEMORY) {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
} else if (IS_I9XX(dev)) {
uint32_t dcc;
/* On 915-945 and GM965, channel interleave by the CPU is
* determined by DCC. The CPU will alternate based on bit 6
* in interleaved mode, and the GPU will then also alternate
* on bit 6, 9, and 10 for X, but the CPU may also optionally
* alternate based on bit 17 (XOR not disabled and XOR
* bit == 17).
*/
dcc = readl(mchbar + DCC);
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
if (IS_I915G(dev) || IS_I915GM(dev) ||
dcc & DCC_CHANNEL_XOR_DISABLE) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_I965GM(dev)) {
/* GM965 only does bit 11-based channel
* randomization
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else {
/* Bit 17 or perhaps other swizzling */
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
break;
}
if (dcc == 0xffffffff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
} else {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
iounmap(mchbar);
dev_priv->mm.bit_6_swizzle_x = swizzle_x;
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
/**
* Sets the tiling mode of an object, returning the required swizzling of
* bit 6 of addresses in the object.
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
obj_priv = obj->driver_private;
mutex_lock(&dev->struct_mutex);
if (args->tiling_mode == I915_TILING_NONE) {
obj_priv->tiling_mode = I915_TILING_NONE;
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
} else {
if (args->tiling_mode == I915_TILING_X)
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
else
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
/* If we can't handle the swizzling, make it untiled. */
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
args->tiling_mode = I915_TILING_NONE;
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
}
}
obj_priv->tiling_mode = args->tiling_mode;
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
return 0;
}
/**
* Returns the current tiling mode and required bit 6 swizzling for the object.
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_get_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
obj_priv = obj->driver_private;
mutex_lock(&dev->struct_mutex);
args->tiling_mode = obj_priv->tiling_mode;
switch (obj_priv->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break;
case I915_TILING_Y:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
break;
case I915_TILING_NONE:
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
break;
default:
DRM_ERROR("unknown tiling mode\n");
}
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
return 0;
}

View File

@ -250,19 +250,20 @@ void opregion_enable_asle(struct drm_device *dev)
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle) {
u32 pipeb_stats = I915_READ(PIPEBSTAT);
if (IS_MOBILE(dev)) {
u32 pipeb_stats = I915_READ(PIPEBSTAT);
/* Some hardware uses the legacy backlight controller
to signal interrupts, so we need to set up pipe B
to generate an IRQ on writes */
I915_WRITE(PIPEBSTAT, pipeb_stats |=
I915_LEGACY_BLC_EVENT_ENABLE);
dev_priv->irq_enable_reg |=
(I915_ASLE_INTERRUPT
| I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
} else
dev_priv->irq_enable_reg |= I915_ASLE_INTERRUPT;
pipeb_stats |= I915_LEGACY_BLC_EVENT_ENABLE;
I915_WRITE(PIPEBSTAT, pipeb_stats);
dev_priv->irq_mask_reg &=
~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
}
dev_priv->irq_mask_reg &= ~I915_ASLE_INTERRUPT;
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN;
asle->ardy = 1;

View File

@ -959,6 +959,31 @@ struct drm_mm_info_arg {
uint64_t p_size;
};
struct drm_gem_close {
/** Handle of the object to be closed. */
uint32_t handle;
uint32_t pad;
};
struct drm_gem_flink {
/** Handle for the object being named */
uint32_t handle;
/** Returned global name */
uint32_t name;
};
struct drm_gem_open {
/** Name of object being opened */
uint32_t name;
/** Returned handle for the object */
uint32_t handle;
/** Returned size of the object */
uint64_t size;
};
/**
* \name Ioctls Definitions
*/
@ -978,7 +1003,11 @@ struct drm_mm_info_arg {
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)

View File

@ -41,10 +41,14 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
u32 last_acthd = I915_READ(acthd_reg);
u32 acthd;
int i;
for (i = 0; i < 10000; i++) {
for (i = 0; i < 100000; i++) {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
@ -54,13 +58,79 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
if (ring->head != last_head)
i = 0;
if (acthd != last_acthd)
i = 0;
last_head = ring->head;
DRM_UDELAY(1);
last_acthd = acthd;
msleep_interruptible (10);
}
return -EBUSY;
}
int i915_init_hardware_status(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(0x02080, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
return 0;
}
void i915_free_hardware_status(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
/* Need to rewrite hardware status page */
I915_WRITE(0x02080, 0x1ffff000);
}
if (dev_priv->status_gfx_addr) {
dev_priv->status_gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
I915_WRITE(0x02080, 0x1ffff000);
}
}
#if I915_RING_VALIDATE
/**
* Validate the cached ring tail value
*
* If the X server writes to the ring and DRM doesn't
* reload the head and tail pointers, it will end up writing
* data to the wrong place in the ring, causing havoc.
*/
void i915_ring_validate(struct drm_device *dev, const char *func, int line)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
u32 tail = I915_READ(LP_RING+RING_TAIL) & HEAD_ADDR;
u32 head = I915_READ(LP_RING+RING_HEAD) & HEAD_ADDR;
if (tail != ring->tail) {
DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
func, line,
ring->head, head, ring->tail, tail);
BUG_ON(1);
}
}
#endif
void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@ -80,7 +150,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (dev->irq)
if (dev->irq_enabled)
drm_irq_uninstall(dev);
if (dev_priv->ring.virtual_start) {
@ -90,18 +160,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
dev_priv->ring.map.size = 0;
}
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
/* Need to rewrite hardware status page */
I915_WRITE(0x02080, 0x1ffff000);
}
if (dev_priv->status_gfx_addr) {
dev_priv->status_gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
I915_WRITE(0x02080, 0x1ffff000);
}
if (I915_NEED_GFX_HWS(dev))
i915_free_hardware_status(dev);
return 0;
}
@ -182,14 +242,6 @@ static int i915_initialize(struct drm_device * dev,
return -EINVAL;
}
if (init->mmio_offset != 0)
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio_map) {
i915_dma_cleanup(dev);
DRM_ERROR("can not find mmio map!\n");
return -EINVAL;
}
#ifdef I915_HAVE_BUFFER
dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
#endif
@ -203,28 +255,28 @@ static int i915_initialize(struct drm_device * dev,
dev_priv->sarea_priv = NULL;
}
dev_priv->ring.Start = init->ring_start;
dev_priv->ring.End = init->ring_end;
dev_priv->ring.Size = init->ring_size;
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
if (init->ring_size != 0) {
dev_priv->ring.Size = init->ring_size;
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
dev_priv->ring.map.offset = init->ring_start;
dev_priv->ring.map.size = init->ring_size;
dev_priv->ring.map.type = 0;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
dev_priv->ring.map.offset = init->ring_start;
dev_priv->ring.map.size = init->ring_size;
dev_priv->ring.map.type = 0;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
drm_core_ioremap(&dev_priv->ring.map, dev);
drm_core_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
if (dev_priv->ring.map.handle == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
}
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
dev_priv->cpp = init->cpp;
if (dev_priv->sarea_priv)
@ -233,9 +285,6 @@ static int i915_initialize(struct drm_device * dev,
/* We are using separate values as placeholders for mechanisms for
* private backbuffer/depthbuffer usage.
*/
dev_priv->use_mi_batchbuffer_start = 0;
if (IS_I965G(dev)) /* 965 doesn't support older method */
dev_priv->use_mi_batchbuffer_start = 1;
/* Allow hardware batchbuffers unless told otherwise.
*/
@ -245,24 +294,6 @@ static int i915_initialize(struct drm_device * dev,
*/
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
/* Program Hardware Status Page */
if (!I915_NEED_GFX_HWS(dev)) {
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
if (!dev_priv->status_page_dmah) {
i915_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(0x02080, dev_priv->dma_status_page);
}
DRM_DEBUG("Enabled hardware status page\n");
#ifdef I915_HAVE_BUFFER
mutex_init(&dev_priv->cmdbuf_mutex);
#endif
@ -291,11 +322,6 @@ static int i915_dma_resume(struct drm_device * dev)
return -EINVAL;
}
if (!dev_priv->mmio_map) {
DRM_ERROR("can not find mmio map!\n");
return -EINVAL;
}
if (dev_priv->ring.map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@ -459,9 +485,9 @@ static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
return 0;
}
static int i915_emit_box(struct drm_device * dev,
struct drm_clip_rect __user * boxes,
int i, int DR1, int DR4)
int i915_emit_box(struct drm_device * dev,
struct drm_clip_rect __user * boxes,
int i, int DR1, int DR4)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_clip_rect box;
@ -517,7 +543,7 @@ void i915_emit_breadcrumb(struct drm_device *dev)
BEGIN_LP_RING(4);
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(20);
OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
@ -610,7 +636,14 @@ int i915_dispatch_batchbuffer(struct drm_device * dev,
return ret;
}
if (dev_priv->use_mi_batchbuffer_start) {
if (IS_I830(dev) || IS_845G(dev)) {
BEGIN_LP_RING(4);
OUT_RING(MI_BATCH_BUFFER);
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
OUT_RING(batch->start + batch->used - 4);
OUT_RING(0);
ADVANCE_LP_RING();
} else {
BEGIN_LP_RING(2);
if (IS_I965G(dev)) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
@ -620,14 +653,6 @@ int i915_dispatch_batchbuffer(struct drm_device * dev,
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
}
ADVANCE_LP_RING();
} else {
BEGIN_LP_RING(4);
OUT_RING(MI_BATCH_BUFFER);
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
OUT_RING(batch->start + batch->used - 4);
OUT_RING(0);
ADVANCE_LP_RING();
}
}
@ -715,9 +740,19 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
int i915_quiescent(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
i915_kernel_lost_context(dev);
return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
if (ret)
{
i915_kernel_lost_context (dev);
DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
dev_priv->ring.head,
dev_priv->ring.tail,
dev_priv->ring.space);
}
return ret;
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@ -854,7 +889,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
value = dev->irq ? 1 : 0;
value = dev->irq_enabled ? 1 : 0;
break;
case I915_PARAM_ALLOW_BATCHBUFFER:
value = dev_priv->allow_batchbuffer ? 1 : 0;
@ -865,6 +900,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_CHIPSET_ID:
value = dev->pci_device;
break;
case I915_PARAM_HAS_GEM:
value = 1;
break;
default:
DRM_ERROR("Unknown parameter %d\n", param->param);
return -EINVAL;
@ -891,8 +929,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
if (!IS_I965G(dev))
dev_priv->use_mi_batchbuffer_start = param->value;
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
dev_priv->tex_lru_log_granularity = param->value;
@ -1026,6 +1062,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
memset(dev_priv, 0, sizeof(drm_i915_private_t));
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
/* Add register map (needed for suspend/resume) */
base = drm_get_resource_start(dev, mmio_bar);
@ -1034,6 +1071,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
_DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
i915_gem_load(dev);
#ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
intel_init_chipset_flush_compat(dev);
@ -1043,6 +1082,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
#endif
#endif
/* Init HWS */
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_hardware_status(dev);
if(ret)
return ret;
}
return ret;
}
@ -1050,8 +1096,9 @@ int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->mmio_map)
drm_rmmap(dev, dev_priv->mmio_map);
i915_free_hardware_status(dev);
drm_rmmap(dev, dev_priv->mmio_map);
#ifdef __linux__
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
@ -1083,6 +1130,7 @@ void i915_driver_lastclose(struct drm_device * dev)
dev_priv->val_bufs = NULL;
}
#endif
i915_gem_lastclose(dev);
if (drm_getsarea(dev) && dev_priv->sarea_priv)
i915_do_cleanup_pageflip(dev);
@ -1106,12 +1154,38 @@ void i915_driver_lastclose(struct drm_device * dev)
i915_dma_cleanup(dev);
}
int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_i915_file_private *i915_file_priv;
DRM_DEBUG("\n");
i915_file_priv = (struct drm_i915_file_private *)
drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
if (!i915_file_priv)
return -ENOMEM;
file_priv->driver_priv = i915_file_priv;
i915_file_priv->mm.last_gem_seqno = 0;
i915_file_priv->mm.last_gem_throttle_seqno = 0;
return 0;
}
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
}
struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
@ -1134,6 +1208,22 @@ struct drm_ioctl_desc i915_ioctls[] = {
#ifdef I915_HAVE_BUFFER
DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
#endif
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);

View File

@ -176,6 +176,22 @@ typedef struct drm_i915_sarea {
#define DRM_I915_MMIO 0x10
#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_EXECBUFFER 0x12
#define DRM_I915_GEM_INIT 0x13
#define DRM_I915_GEM_EXECBUFFER 0x14
#define DRM_I915_GEM_PIN 0x15
#define DRM_I915_GEM_UNPIN 0x16
#define DRM_I915_GEM_BUSY 0x17
#define DRM_I915_GEM_THROTTLE 0x18
#define DRM_I915_GEM_ENTERVT 0x19
#define DRM_I915_GEM_LEAVEVT 0x1a
#define DRM_I915_GEM_CREATE 0x1b
#define DRM_I915_GEM_PREAD 0x1c
#define DRM_I915_GEM_PWRITE 0x1d
#define DRM_I915_GEM_MMAP 0x1e
#define DRM_I915_GEM_SET_DOMAIN 0x1f
#define DRM_I915_GEM_SW_FINISH 0x20
#define DRM_I915_GEM_SET_TILING 0x21
#define DRM_I915_GEM_GET_TILING 0x22
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@ -195,6 +211,22 @@ typedef struct drm_i915_sarea {
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
/* Asynchronous page flipping:
*/
@ -248,6 +280,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
typedef struct drm_i915_getparam {
int param;
@ -395,4 +428,292 @@ struct drm_i915_execbuffer {
struct drm_fence_arg fence_arg;
};
struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the DRM memory
* manager.
*/
uint64_t gtt_start;
/**
* Ending offset in the GTT to be managed by the DRM memory
* manager.
*/
uint64_t gtt_end;
};
struct drm_i915_gem_create {
/**
* Requested size for the object.
*
* The (page-aligned) allocated size for the object will be returned.
*/
uint64_t size;
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
uint32_t handle;
uint32_t pad;
};
struct drm_i915_gem_pread {
/** Handle for the object being read. */
uint32_t handle;
uint32_t pad;
/** Offset into the object to read from */
uint64_t offset;
/** Length of data to read */
uint64_t size;
/** Pointer to write the data into. */
uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
};
struct drm_i915_gem_pwrite {
/** Handle for the object being written to. */
uint32_t handle;
uint32_t pad;
/** Offset into the object to write to */
uint64_t offset;
/** Length of data to write */
uint64_t size;
/** Pointer to read the data from. */
uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
};
struct drm_i915_gem_mmap {
/** Handle for the object being mapped. */
uint32_t handle;
uint32_t pad;
/** Offset in the object to map. */
uint64_t offset;
/**
* Length of data to map.
*
* The value will be page-aligned.
*/
uint64_t size;
/** Returned pointer the data was mapped at */
uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
};
struct drm_i915_gem_set_domain {
/** Handle for the object */
uint32_t handle;
/** New read domains */
uint32_t read_domains;
/** New write domain */
uint32_t write_domain;
};
struct drm_i915_gem_sw_finish {
/** Handle for the object */
uint32_t handle;
};
struct drm_i915_gem_relocation_entry {
/**
* Handle of the buffer being pointed to by this relocation entry.
*
* It's appealing to make this be an index into the mm_validate_entry
* list to refer to the buffer, but this allows the driver to create
* a relocation list for state buffers and not re-write it per
* exec using the buffer.
*/
uint32_t target_handle;
/**
* Value to be added to the offset of the target buffer to make up
* the relocation entry.
*/
uint32_t delta;
/** Offset in the buffer the relocation entry will be written into */
uint64_t offset;
/**
* Offset value of the target buffer that the relocation entry was last
* written as.
*
* If the buffer has the same offset as last time, we can skip syncing
* and writing the relocation. This value is written back out by
* the execbuffer ioctl when the relocation is written.
*/
uint64_t presumed_offset;
/**
* Target memory domains read by this operation.
*/
uint32_t read_domains;
/**
* Target memory domains written by this operation.
*
* Note that only one domain may be written by the whole
* execbuffer operation, so that where there are conflicts,
* the application will get -EINVAL back.
*/
uint32_t write_domain;
};
/** @{
* Intel memory domains
*
* Most of these just align with the various caches in
* the system and are used to flush and invalidate as
* objects end up cached in different domains.
*/
/** CPU cache */
#define I915_GEM_DOMAIN_CPU 0x00000001
/** Render cache, used by 2D and 3D drawing */
#define I915_GEM_DOMAIN_RENDER 0x00000002
/** Sampler cache, used by texture engine */
#define I915_GEM_DOMAIN_SAMPLER 0x00000004
/** Command queue, used to load batch buffers */
#define I915_GEM_DOMAIN_COMMAND 0x00000008
/** Instruction cache, used by shader programs */
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
/** Vertex address cache */
#define I915_GEM_DOMAIN_VERTEX 0x00000020
/** GTT domain - aperture and scanout */
#define I915_GEM_DOMAIN_GTT 0x00000040
/** @} */
struct drm_i915_gem_exec_object {
/**
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
uint32_t handle;
/** Number of relocations to be performed on this buffer */
uint32_t relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
uint64_t relocs_ptr;
/** Required alignment in graphics aperture */
uint64_t alignment;
/**
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
uint64_t offset;
};
struct drm_i915_gem_execbuffer {
/**
* List of buffers to be validated with their relocations to be
* performend on them.
*
* This is a pointer to an array of struct drm_i915_gem_validate_entry.
*
* These buffers must be listed in an order such that all relocations
* a buffer is performing refer to buffers that have already appeared
* in the validate list.
*/
uint64_t buffers_ptr;
uint32_t buffer_count;
/** Offset in the batchbuffer to start execution from. */
uint32_t batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
uint32_t batch_len;
uint32_t DR1;
uint32_t DR4;
uint32_t num_cliprects;
uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
};
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
uint32_t handle;
uint32_t pad;
/** alignment required within the aperture */
uint64_t alignment;
/** Returned GTT offset of the buffer. */
uint64_t offset;
};
struct drm_i915_gem_unpin {
/** Handle of the buffer to be unpinned. */
uint32_t handle;
uint32_t pad;
};
struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
uint32_t handle;
/** Return busy status (1 if busy, 0 if idle) */
uint32_t busy;
};
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
#define I915_BIT_6_SWIZZLE_NONE 0
#define I915_BIT_6_SWIZZLE_9 1
#define I915_BIT_6_SWIZZLE_9_10 2
#define I915_BIT_6_SWIZZLE_9_11 3
#define I915_BIT_6_SWIZZLE_9_10_11 4
/* Not seen by userland */
#define I915_BIT_6_SWIZZLE_UNKNOWN 5
struct drm_i915_gem_set_tiling {
/** Handle of the buffer to have its tiling state updated */
uint32_t handle;
/**
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*
* This value is to be set on request, and will be updated by the
* kernel on successful return with the actual chosen tiling layout.
*
* The tiling mode may be demoted to I915_TILING_NONE when the system
* has bit 6 swizzling that can't be managed correctly by GEM.
*
* Buffer contents become undefined when changing tiling_mode.
*/
uint32_t tiling_mode;
/**
* Stride in bytes for the object when in I915_TILING_X or
* I915_TILING_Y.
*/
uint32_t stride;
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
uint32_t swizzle_mode;
};
struct drm_i915_gem_get_tiling {
/** Handle of the buffer to get tiling state for. */
uint32_t handle;
/**
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*/
uint32_t tiling_mode;
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
uint32_t swizzle_mode;
};
#endif /* _I915_DRM_H_ */

View File

@ -37,7 +37,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20080312"
#define DRIVER_DATE "20080730"
#if defined(__linux__)
#define I915_HAVE_FENCE
@ -77,16 +77,23 @@ enum pipe {
struct drm_i915_validate_buffer;
#endif
#define WATCH_COHERENCY 0
#define WATCH_BUF 0
#define WATCH_EXEC 0
#define WATCH_LRU 0
#define WATCH_RELOC 0
#define WATCH_INACTIVE 0
#define WATCH_PWRITE 0
typedef struct _drm_i915_ring_buffer {
int tail_mask;
unsigned long Start;
unsigned long End;
unsigned long Size;
u8 *virtual_start;
int head;
int tail;
int space;
drm_local_map_t map;
struct drm_gem_object *ring_obj;
} drm_i915_ring_buffer_t;
struct mem_block {
@ -122,6 +129,8 @@ struct intel_opregion {
#endif
typedef struct drm_i915_private {
struct drm_device *dev;
drm_local_map_t *sarea;
drm_local_map_t *mmio_map;
@ -134,13 +143,12 @@ typedef struct drm_i915_private {
uint32_t counter;
unsigned int status_gfx_addr;
drm_local_map_t hws_map;
struct drm_gem_object *hws_obj;
unsigned int cpp;
int use_mi_batchbuffer_start;
wait_queue_head_t irq_queue;
atomic_t irq_received;
atomic_t irq_emitted;
int tex_lru_log_granularity;
int allow_batchbuffer;
@ -150,7 +158,7 @@ typedef struct drm_i915_private {
DRM_SPINTYPE user_irq_lock;
int user_irq_refcount;
int fence_irq_on;
uint32_t irq_enable_reg;
uint32_t irq_mask_reg;
int irq_enabled;
#ifdef I915_HAVE_FENCE
@ -267,8 +275,97 @@ typedef struct drm_i915_private {
u8 saveDACMASK;
u8 saveDACDATA[256*3]; /* 256 3-byte colors */
u8 saveCR[37];
struct {
struct drm_mm gtt_space;
/**
* List of objects currently involved in rendering from the
* ringbuffer.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
/**
* List of objects which are not in the ringbuffer but which
* still have a write_domain which needs to be flushed before
* unbinding.
*
* A reference is held on the buffer while on this list.
*/
struct list_head flushing_list;
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
*/
struct list_head inactive_list;
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding.
*/
struct list_head request_list;
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
struct delayed_work retire_work;
uint32_t next_gem_seqno;
/**
* Waiting sequence number, if any
*/
uint32_t waiting_gem_seqno;
/**
* Last seq seen at irq time
*/
uint32_t irq_gem_seqno;
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
* This is set between LeaveVT and EnterVT. It needs to be
* replaced with a semaphore. It also needs to be
* transitioned away from for kernel modesetting.
*/
int suspended;
/**
* Flag if the hardware appears to be wedged.
*
* This is set when attempts to idle the device timeout.
* It prevents command submission from occuring and makes
* every pending request fail
*/
int wedged;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
uint32_t bit_6_swizzle_y;
} mm;
} drm_i915_private_t;
struct drm_i915_file_private {
struct {
uint32_t last_gem_seqno;
uint32_t last_gem_throttle_seqno;
} mm;
};
enum intel_chip_family {
CHIP_I8XX = 0x01,
CHIP_I9XX = 0x02,
@ -276,6 +373,83 @@ enum intel_chip_family {
CHIP_I965 = 0x08,
};
/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object {
struct drm_gem_object *obj;
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
/** This object's place on the active/flushing/inactive lists */
struct list_head list;
/**
* This is set if the object is on the active or flushing lists
* (has pending rendering), and is not set if it's on inactive (ready
* to be unbound).
*/
int active;
/**
* This is set if the object has been written to since last bound
* to the GTT
*/
int dirty;
/** AGP memory structure for our GTT binding. */
DRM_AGP_MEM *agp_mem;
struct page **page_list;
/**
* Current offset of the object in GTT space.
*
* This is the same as gtt_space->start
*/
uint32_t gtt_offset;
/** Boolean whether this object has a valid gtt offset. */
int gtt_bound;
/** How many users have pinned this object in GTT space */
int pin_count;
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno;
/** Current tiling mode for the object. */
uint32_t tiling_mode;
/**
* Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
* GEM_DOMAIN_CPU is not in the object's read domain.
*/
uint8_t *page_cpu_valid;
};
/**
* Request queue structure.
*
* The request queue allows us to note sequence numbers that have been emitted
* and may be associated with active buffers to be retired.
*
* By keeping this list, we can avoid having to do questionable
* sequence-number comparisons on buffer last_rendering_seqnos, and associate
* an emission time with seqnos for tracking how far ahead of the GPU we are.
*/
struct drm_i915_gem_request {
/** GEM sequence number associated with this request. */
uint32_t seqno;
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
/** Cache domains that were flushed at the start of the request. */
uint32_t flush_domains;
struct list_head list;
};
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
@ -284,8 +458,11 @@ extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern void i915_driver_lastclose(struct drm_device * dev);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
@ -297,6 +474,10 @@ extern int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch);
extern int i915_quiescent(struct drm_device *dev);
int i915_emit_box(struct drm_device * dev,
struct drm_clip_rect __user * boxes,
int i, int DR1, int DR4);
/* i915_irq.c */
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@ -312,6 +493,7 @@ extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_emit_irq(struct drm_device * dev);
extern int i915_wait_irq(struct drm_device * dev, int irq_nr);
extern int i915_enable_vblank(struct drm_device *dev, int crtc);
extern void i915_disable_vblank(struct drm_device *dev, int crtc);
extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
@ -360,9 +542,69 @@ void i915_flush_ttm(struct drm_ttm *ttm);
/* i915_execbuf.c */
int i915_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_proc_init(struct drm_minor *minor);
void i915_gem_proc_cleanup(struct drm_minor *minor);
int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
uint32_t i915_get_gem_seqno(struct drm_device *dev);
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_work_handler(struct work_struct *work);
void i915_gem_clflush_object(struct drm_gem_object *obj);
#endif
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark);
#if WATCH_INACTIVE
void i915_verify_inactive(struct drm_device *dev, char *file, int line);
#else
#define i915_verify_inactive(dev,file,line)
#endif
void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark);
void i915_dump_lru(struct drm_device *dev, const char *where);
#ifdef __linux__
/* i915_opregion.c */
extern int intel_opregion_init(struct drm_device *dev);
@ -390,16 +632,25 @@ typedef boolean_t bool;
#endif
#define I915_VERBOSE 0
#define I915_RING_VALIDATE 0
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
volatile char *virt;
#if I915_RING_VALIDATE
void i915_ring_validate(struct drm_device *dev, const char *func, int line);
#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__)
#else
#define I915_RING_DO_VALIDATE(dev)
#endif
#define BEGIN_LP_RING(n) do { \
if (I915_VERBOSE) \
DRM_DEBUG("BEGIN_LP_RING(%d)\n", \
(n)); \
I915_RING_DO_VALIDATE(dev); \
if (dev_priv->ring.space < (n)*4) \
i915_wait_ring(dev, (n)*4, __FUNCTION__); \
outcount = 0; \
@ -418,6 +669,7 @@ typedef boolean_t bool;
#define ADVANCE_LP_RING() do { \
if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
I915_RING_DO_VALIDATE(dev); \
dev_priv->ring.tail = outring; \
dev_priv->ring.space -= outcount * 4; \
I915_WRITE(PRB0_TAIL, outring); \
@ -425,6 +677,39 @@ typedef boolean_t bool;
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define BREADCRUMB_BITS 31
#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
* MI_STORE_DATA_IMM.
*
* The following dwords have a reserved meaning:
* 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
* 4: ring 0 head pointer
* 5: ring 1 head pointer (915-class)
* 6: ring 2 head pointer (915-class)
*
* The area from dword 0x10 to 0x3ff is available for driver usage.
*/
#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
#define I915_GEM_HWS_INDEX 0x10
/* MCH MMIO space */
/** 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x200
#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
#define DCC_ADDRESSING_MODE_MASK (3 << 0)
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
/** 965 MCH register controlling DRAM channel configuration */
#define CHDECMISC 0x111
#define CHDECMISC_FLEXMEMORY (1 << 1)
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
@ -525,33 +810,13 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define BREADCRUMB_BITS 31
#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
* MI_STORE_DATA_IMM.
*
* The following dwords have a reserved meaning:
* 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
* 4: ring 0 head pointer
* 5: ring 1 head pointer (915-class)
* 6: ring 2 head pointer (915-class)
*
* The area from dword 0x10 to 0x3ff is available for driver usage.
*/
#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
#define I915_GEM_HWS_INDEX 0x10
/*
* 3D instructions used by the kernel
*/
@ -574,6 +839,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
@ -617,7 +883,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
#define ACTHD_I965 0x02074
#define HWS_PGA 0x02080
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
#define IPEIR 0x02088
#define NOPID 0x02094
#define HWSTAM 0x02098
@ -647,6 +916,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define EMR 0x020b4
#define ESR 0x020b8
#define INSTPM 0x020c0
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC_SELF 0x020e0 /* 915+ only */
#define MI_ARB_STATE 0x020e4 /* 915+ only */
@ -696,7 +966,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
/*
* GPIO regs
*/
#define GPIOA 0x5010
#define GPIOB 0x5014
#define GPIOC 0x5018

View File

@ -33,6 +33,33 @@
#define MAX_NOPID ((u32)~0)
/*
* These are the interrupts used by the driver
*/
#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
static inline void
i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
{
if ((dev_priv->irq_mask_reg & mask) != 0) {
dev_priv->irq_mask_reg &= ~mask;
I915_WRITE(IMR, dev_priv->irq_mask_reg);
(void) I915_READ(IMR);
}
}
static inline void
i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
{
if ((dev_priv->irq_mask_reg & mask) != mask) {
dev_priv->irq_mask_reg |= mask;
I915_WRITE(IMR, dev_priv->irq_mask_reg);
(void) I915_READ(IMR);
}
}
/**
* i915_get_pipe - return the the pipe associated with a given plane
* @dev: DRM device
@ -403,12 +430,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 iir;
u32 pipea_stats, pipeb_stats;
u32 pipea_stats = 0, pipeb_stats = 0;
int vblank = 0;
if (dev->pdev->msi_enabled)
I915_WRITE(IMR, ~0);
iir = I915_READ(IIR);
if (iir == 0)
#if 0
DRM_DEBUG("flag=%08x\n", iir);
#endif
atomic_inc(&dev_priv->irq_received);
if (iir == 0) {
if (dev->pdev->msi_enabled) {
I915_WRITE(IMR, dev_priv->irq_mask_reg);
(void) I915_READ(IMR);
}
return IRQ_NONE;
}
/*
* Clear the PIPE(A|B)STAT regs before the IIR otherwise
@ -422,7 +460,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
vblank++;
drm_handle_vblank(dev, i915_get_plane(dev, 0));
}
I915_WRITE(PIPEASTAT, pipea_stats);
}
if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
@ -462,9 +499,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
I915_WRITE(IIR, iir);
(void) I915_READ(IIR);
if (dev->pdev->msi_enabled)
I915_WRITE(IMR, dev_priv->irq_mask_reg);
(void) I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT) {
dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
DRM_WAKEUP(&dev_priv->irq_queue);
#ifdef I915_HAVE_FENCE
i915_fence_handler(dev);
@ -501,35 +541,40 @@ int i915_emit_irq(struct drm_device *dev)
void i915_user_irq_on(drm_i915_private_t *dev_priv)
{
DRM_SPINLOCK(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
I915_WRITE(IER, dev_priv->irq_enable_reg);
}
if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1))
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
}
void i915_user_irq_off(drm_i915_private_t *dev_priv)
{
DRM_SPINLOCK(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
// dev_priv->irq_enable_reg &= ~I915_USER_INTERRUPT;
// I915_WRITE(IER, dev_priv->irq_enable_reg);
}
BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0))
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = 0;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
if (READ_BREADCRUMB(dev_priv) >= irq_nr)
if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->last_dispatch =
READ_BREADCRUMB(dev_priv);
return 0;
}
i915_user_irq_on(dev_priv);
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
@ -594,16 +639,17 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
u32 mask_reg = 0;
u32 pipestat;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
@ -629,7 +675,9 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
}
I915_WRITE(IER, dev_priv->irq_enable_reg);
DRM_SPINLOCK(&dev_priv->user_irq_lock);
i915_enable_irq(dev_priv, mask_reg);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
return 0;
}
@ -639,16 +687,17 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
u32 mask_reg = 0;
u32 pipestat;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
@ -656,7 +705,9 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
break;
}
I915_WRITE(IER, dev_priv->irq_enable_reg);
DRM_SPINLOCK(&dev_priv->user_irq_lock);
i915_disable_irq(dev_priv, mask_reg);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
if (pipestat_reg)
{
@ -669,14 +720,18 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
(void) I915_READ(pipestat_reg);
}
}
static void i915_enable_interrupt (struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
dev_priv->irq_mask_reg = ~0;
I915_WRITE(IMR, dev_priv->irq_mask_reg);
I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
(void) I915_READ (IER);
#ifdef __linux__
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
@ -684,7 +739,6 @@ static void i915_enable_interrupt (struct drm_device *dev)
#endif
#endif
I915_WRITE(IER, dev_priv->irq_enable_reg);
dev_priv->irq_enabled = 1;
}
@ -900,7 +954,7 @@ int i915_driver_irq_postinstall(struct drm_device * dev)
DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
dev_priv->user_irq_refcount = 0;
dev_priv->irq_enable_reg = 0;
dev_priv->irq_mask_reg = ~0;
ret = drm_vblank_init(dev, num_pipes);
if (ret)

View File

@ -22,7 +22,10 @@ TESTS = auth \
getstats \
lock \
setversion \
updatedraw
updatedraw \
gem_basic \
gem_readwrite \
gem_mmap
EXTRA_PROGRAMS = $(TESTS)
CLEANFILES = $(EXTRA_PROGRAMS) $(EXTRA_LTLIBRARIES)

View File

@ -26,6 +26,7 @@
*/
#include <fcntl.h>
#include <sys/stat.h>
#include "drmtest.h"
/** Open the first DRM device we can find, searching up to 16 device nodes */
@ -80,4 +81,3 @@ int drm_open_any_master(void)
fprintf(stderr, "Couldn't find an un-controlled DRM device\n");
abort();
}

98
tests/gem_basic.c Normal file
View File

@ -0,0 +1,98 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <fcntl.h>
#include <inttypes.h>
#include <errno.h>
#include <sys/stat.h>
#include "drm.h"
#include "i915_drm.h"
static void
test_bad_close(int fd)
{
struct drm_gem_close close;
int ret;
printf("Testing error return on bad close ioctl.\n");
close.handle = 0x10101010;
ret = ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
assert(ret == -1 && errno == EINVAL);
}
static void
test_create_close(int fd)
{
struct drm_i915_gem_create create;
struct drm_gem_close close;
int ret;
printf("Testing creating and closing an object.\n");
memset(&create, 0, sizeof(create));
create.size = 16 * 1024;
ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
close.handle = create.handle;
ret = ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
}
static void
test_create_fd_close(int fd)
{
struct drm_i915_gem_create create;
int ret;
printf("Testing closing with an object allocated.\n");
memset(&create, 0, sizeof(create));
create.size = 16 * 1024;
ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
close(fd);
}
int main(int argc, char **argv)
{
int fd;
fd = drm_open_any();
test_bad_close(fd);
test_create_close(fd);
test_create_fd_close(fd);
return 0;
}

132
tests/gem_mmap.c Normal file
View File

@ -0,0 +1,132 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <fcntl.h>
#include <inttypes.h>
#include <errno.h>
#include <sys/stat.h>
#include "drm.h"
#include "i915_drm.h"
#define OBJECT_SIZE 16384
int do_read(int fd, int handle, void *buf, int offset, int size)
{
struct drm_i915_gem_pread read;
/* Ensure that we don't have any convenient data in buf in case
* we fail.
*/
memset(buf, 0xd0, size);
memset(&read, 0, sizeof(read));
read.handle = handle;
read.data_ptr = (uintptr_t)buf;
read.size = size;
read.offset = offset;
return ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &read);
}
int do_write(int fd, int handle, void *buf, int offset, int size)
{
struct drm_i915_gem_pwrite write;
memset(&write, 0, sizeof(write));
write.handle = handle;
write.data_ptr = (uintptr_t)buf;
write.size = size;
write.offset = offset;
return ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &write);
}
int main(int argc, char **argv)
{
int fd;
struct drm_i915_gem_create create;
struct drm_i915_gem_mmap mmap;
struct drm_gem_close unref;
uint8_t expected[OBJECT_SIZE];
uint8_t buf[OBJECT_SIZE];
uint8_t *addr;
int ret;
int handle;
fd = drm_open_any();
memset(&mmap, 0, sizeof(mmap));
mmap.handle = 0x10101010;
mmap.offset = 0;
mmap.size = 4096;
printf("Testing mmaping of bad object.\n");
ret = ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap);
assert(ret == -1 && errno == EBADF);
memset(&create, 0, sizeof(create));
create.size = OBJECT_SIZE;
ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
handle = create.handle;
printf("Testing mmaping of newly created object.\n");
mmap.handle = handle;
mmap.offset = 0;
mmap.size = OBJECT_SIZE;
ret = ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap);
assert(ret == 0);
addr = (uint8_t *)(uintptr_t)mmap.addr_ptr;
printf("Testing contents of newly created object.\n");
memset(expected, 0, sizeof(expected));
assert(memcmp(addr, expected, sizeof(expected)) == 0);
printf("Testing coherency of writes and mmap reads.\n");
memset(buf, 0, sizeof(buf));
memset(buf + 1024, 0x01, 1024);
memset(expected + 1024, 0x01, 1024);
ret = do_write(fd, handle, buf, 0, OBJECT_SIZE);
assert(ret == 0);
assert(memcmp(buf, addr, sizeof(buf)) == 0);
printf("Testing that mapping stays after close\n");
unref.handle = handle;
ret = ioctl(fd, DRM_IOCTL_GEM_CLOSE, &unref);
assert(ret == 0);
assert(memcmp(buf, addr, sizeof(buf)) == 0);
printf("Testing unmapping\n");
munmap(addr, OBJECT_SIZE);
close(fd);
return 0;
}

135
tests/gem_readwrite.c Normal file
View File

@ -0,0 +1,135 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <fcntl.h>
#include <inttypes.h>
#include <errno.h>
#include <sys/stat.h>
#include "drm.h"
#include "i915_drm.h"
#define OBJECT_SIZE 16384
int do_read(int fd, int handle, void *buf, int offset, int size)
{
struct drm_i915_gem_pread read;
/* Ensure that we don't have any convenient data in buf in case
* we fail.
*/
memset(buf, 0xd0, size);
memset(&read, 0, sizeof(read));
read.handle = handle;
read.data_ptr = (uintptr_t)buf;
read.size = size;
read.offset = offset;
return ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &read);
}
int do_write(int fd, int handle, void *buf, int offset, int size)
{
struct drm_i915_gem_pwrite write;
memset(&write, 0, sizeof(write));
write.handle = handle;
write.data_ptr = (uintptr_t)buf;
write.size = size;
write.offset = offset;
return ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &write);
}
int main(int argc, char **argv)
{
int fd;
struct drm_i915_gem_create create;
uint8_t expected[OBJECT_SIZE];
uint8_t buf[OBJECT_SIZE];
int ret;
int handle;
fd = drm_open_any();
memset(&create, 0, sizeof(create));
create.size = OBJECT_SIZE;
ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
handle = create.handle;
printf("Testing contents of newly created object.\n");
ret = do_read(fd, handle, buf, 0, OBJECT_SIZE);
assert(ret == 0);
memset(&expected, 0, sizeof(expected));
assert(memcmp(expected, buf, sizeof(expected)) == 0);
printf("Testing read beyond end of buffer.\n");
ret = do_read(fd, handle, buf, OBJECT_SIZE / 2, OBJECT_SIZE);
printf("%d %d\n", ret, errno);
assert(ret == -1 && errno == EINVAL);
printf("Testing full write of buffer\n");
memset(buf, 0, sizeof(buf));
memset(buf + 1024, 0x01, 1024);
memset(expected + 1024, 0x01, 1024);
ret = do_write(fd, handle, buf, 0, OBJECT_SIZE);
assert(ret == 0);
ret = do_read(fd, handle, buf, 0, OBJECT_SIZE);
assert(ret == 0);
assert(memcmp(buf, expected, sizeof(buf)) == 0);
printf("Testing partial write of buffer\n");
memset(buf + 4096, 0x02, 1024);
memset(expected + 4096, 0x02, 1024);
ret = do_write(fd, handle, buf + 4096, 4096, 1024);
assert(ret == 0);
ret = do_read(fd, handle, buf, 0, OBJECT_SIZE);
assert(ret == 0);
assert(memcmp(buf, expected, sizeof(buf)) == 0);
printf("Testing partial read of buffer\n");
ret = do_read(fd, handle, buf, 512, 1024);
assert(ret == 0);
assert(memcmp(buf, expected + 512, 1024) == 0);
printf("Testing read of bad buffer handle\n");
ret = do_read(fd, 1234, buf, 0, 1024);
assert(ret == -1 && errno == EBADF);
printf("Testing write of bad buffer handle\n");
ret = do_write(fd, 1234, buf, 0, 1024);
assert(ret == -1 && errno == EBADF);
close(fd);
return 0;
}