intel: Remove the mapped flag, which is adequately covered by bo_gem->virtual.

main
Eric Anholt 2008-12-14 14:32:09 -08:00
parent 9583c099b4
commit 782316801b
1 changed files with 53 additions and 67 deletions

View File

@ -106,7 +106,6 @@ struct _drm_intel_bo_gem {
int refcount;
/** Boolean whether the mmap ioctl has been called for this buffer yet. */
int mapped;
uint32_t gem_handle;
const char *name;
@ -134,7 +133,7 @@ struct _drm_intel_bo_gem {
drm_intel_bo **reloc_target_bo;
/** Number of entries in relocs */
int reloc_count;
/** Mapped address for the buffer */
/** Mapped address for the buffer, saved across map/unmap cycles */
void *virtual;
/** free list */
@ -441,7 +440,7 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
struct drm_gem_close close;
int ret;
if (bo_gem->mapped)
if (bo_gem->virtual)
munmap (bo_gem->virtual, bo_gem->bo.size);
/* Close this object */
@ -523,15 +522,11 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
/* Allow recursive mapping. Mesa may recursively map buffers with
* nested display loops.
*/
if (!bo_gem->mapped) {
assert(bo->virtual == NULL);
if (!bo_gem->virtual) {
struct drm_i915_gem_mmap mmap_arg;
DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
if (bo_gem->virtual == NULL) {
struct drm_i915_gem_mmap mmap_arg;
memset(&mmap_arg, 0, sizeof(mmap_arg));
mmap_arg.handle = bo_gem->gem_handle;
mmap_arg.offset = 0;
@ -543,12 +538,11 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
bo_gem->gem_handle, bo_gem->name, strerror(errno));
}
bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
}
bo->virtual = bo_gem->virtual;
bo_gem->swrast = 0;
bo_gem->mapped = 1;
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual);
}
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
bo_gem->virtual);
bo->virtual = bo_gem->virtual;
if (!bo_gem->swrast) {
set_domain.handle = bo_gem->gem_handle;
@ -583,24 +577,17 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
pthread_mutex_lock(&bufmgr_gem->lock);
/* Allow recursive mapping. Mesa may recursively map buffers with
* nested display loops.
*/
if (!bo_gem->mapped) {
assert(bo->virtual == NULL);
DBG("bo_map_gtt: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
/* Get a mapping of the buffer if we haven't before. */
if (bo_gem->virtual == NULL) {
struct drm_i915_gem_mmap_gtt mmap_arg;
DBG("bo_map_gtt: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
memset(&mmap_arg, 0, sizeof(mmap_arg));
mmap_arg.handle = bo_gem->gem_handle;
/* Get the fake offset back... */
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT,
&mmap_arg);
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
if (ret != 0) {
fprintf(stderr,
"%s:%d: Error preparing buffer map %d (%s): %s .\n",
@ -627,10 +614,9 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
}
bo->virtual = bo_gem->virtual;
bo_gem->mapped = 1;
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
bo_gem->virtual);
}
/* Now move it to the GTT domain so that the CPU caches are flushed */
set_domain.handle = bo_gem->gem_handle;
@ -662,7 +648,7 @@ drm_intel_gem_bo_unmap(drm_intel_bo *bo)
if (bo == NULL)
return 0;
assert(bo_gem->mapped);
assert(bo_gem->virtual != NULL);
pthread_mutex_lock(&bufmgr_gem->lock);
if (bo_gem->swrast) {