libdrm: add support for i915 GTT mapping ioctl

Add a drm_intel_gem_bo_map_gtt() function for mapping a buffer object
through the aperture rather than directly to its CPU cacheable memory.
main
Jesse Barnes 2008-11-13 13:52:04 -08:00
parent 930c0e7cf4
commit 276c07d885
4 changed files with 104 additions and 1 deletions

View File

@ -47,6 +47,13 @@ struct _drm_intel_bo {
* allocation, such as being aligned to page size.
*/
unsigned long size;
/**
* Alignment requirement for object
*
* Used for GTT mapping & pinning the object.
*/
unsigned long align;
/**
* Card virtual address (offset from the beginning of the aperture) for the
* object. Only valid while validated.
@ -98,6 +105,7 @@ drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned int handle);
void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
/* drm_intel_bufmgr_fake.c */
drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,

View File

@ -39,6 +39,7 @@
#endif
#include <xf86drm.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -47,6 +48,8 @@
#include <pthread.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "errno.h"
#include "intel_bufmgr.h"
@ -569,6 +572,84 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
return 0;
}
int
drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
struct drm_i915_gem_set_domain set_domain;
int ret;
pthread_mutex_lock(&bufmgr_gem->lock);
/* Allow recursive mapping. Mesa may recursively map buffers with
* nested display loops.
*/
if (!bo_gem->mapped) {
assert(bo->virtual == NULL);
DBG("bo_map_gtt: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
if (bo_gem->virtual == NULL) {
struct drm_i915_gem_mmap_gtt mmap_arg;
memset(&mmap_arg, 0, sizeof(mmap_arg));
mmap_arg.handle = bo_gem->gem_handle;
/* Get the fake offset back... */
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT,
&mmap_arg);
if (ret != 0) {
fprintf(stderr,
"%s:%d: Error preparing buffer map %d (%s): %s .\n",
__FILE__, __LINE__,
bo_gem->gem_handle, bo_gem->name,
strerror(errno));
pthread_mutex_unlock(&bufmgr_gem->lock);
return ret;
}
/* and mmap it */
bo_gem->virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
MAP_SHARED, bufmgr_gem->fd,
mmap_arg.offset);
if (bo_gem->virtual == MAP_FAILED) {
fprintf(stderr,
"%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__,
bo_gem->gem_handle, bo_gem->name,
strerror(errno));
pthread_mutex_unlock(&bufmgr_gem->lock);
return errno;
}
}
bo->virtual = bo_gem->virtual;
bo_gem->mapped = 1;
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
bo_gem->virtual);
}
/* Now move it to the GTT domain so that the CPU caches are flushed */
set_domain.handle = bo_gem->gem_handle;
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
set_domain.write_domain = I915_GEM_DOMAIN_GTT;
do {
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
&set_domain);
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
__FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
}
pthread_mutex_unlock(&bufmgr_gem->lock);
return 0;
}
static int
drm_intel_gem_bo_unmap(drm_intel_bo *bo)
{

View File

@ -236,7 +236,7 @@ enum drm_map_type {
_DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
_DRM_TTM = 6
_DRM_GEM = 6
};
/**

View File

@ -193,6 +193,7 @@ typedef struct drm_i915_sarea {
#define DRM_I915_GEM_SET_TILING 0x21
#define DRM_I915_GEM_GET_TILING 0x22
#define DRM_I915_GEM_GET_APERTURE 0x23
#define DRM_I915_GEM_MMAP_GTT 0x24
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@ -224,6 +225,7 @@ typedef struct drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
@ -499,6 +501,18 @@ struct drm_i915_gem_mmap {
uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
};
struct drm_i915_gem_mmap_gtt {
/** Handle for the object being mapped. */
uint32_t handle;
uint32_t pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
uint64_t offset;
};
struct drm_i915_gem_set_domain {
/** Handle for the object */
uint32_t handle;