intel: Fix up math errors when allocating very large BOs.

The logbase2 would overflow and wrap the size around to 0, making the code
allocate a 4kb object instead.  By simplifying the code to just walk the
14-entry bucket array comparing sizes instead of indexing on
ffs(1 << logbase2(size)), we avoid silly math errors and have code of
approximately the same speed.

Many thanks to Simon Farnsworth for debugging and providing a working patch.
Bug #27365.
main
Eric Anholt 2009-07-06 11:55:28 -07:00
parent cea2d29ee4
commit 78fa590a1d
1 changed files with 15 additions and 31 deletions

View File

@ -78,6 +78,7 @@ struct drm_intel_gem_bo_bucket {
*/
int max_entries;
int num_entries;
unsigned long size;
};
/* Only cache objects up to 64MB. Bigger than that, and the rounding of the
@ -203,39 +204,20 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
static void
drm_intel_gem_bo_unreference(drm_intel_bo *bo);
static int
logbase2(int n)
{
int i = 1;
int log2 = 0;
while (n > i) {
i *= 2;
log2++;
}
return log2;
}
static struct drm_intel_gem_bo_bucket *
drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
unsigned long size)
{
int i;
/* We only do buckets in power of two increments */
if ((size & (size - 1)) != 0)
return NULL;
for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
if (bucket->size >= size) {
return bucket;
}
}
/* We should only see sizes rounded to pages. */
assert((size % 4096) == 0);
/* We always allocate in units of pages */
i = ffs(size / 4096) - 1;
if (i >= DRM_INTEL_GEM_BO_BUCKETS)
return NULL;
return &bufmgr_gem->cache_bucket[i];
return NULL;
}
@ -345,10 +327,7 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long bo_size;
/* Round the allocated size up to a power of two number of pages. */
bo_size = 1 << logbase2(size);
if (bo_size < page_size)
bo_size = page_size;
bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
/* If we don't have caching at this size, don't actually round the
* allocation up.
@ -357,6 +336,8 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
bo_size = size;
if (bo_size < page_size)
bo_size = page_size;
} else {
bo_size = bucket->size;
}
pthread_mutex_lock(&bufmgr_gem->lock);
@ -1412,6 +1393,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
struct drm_i915_gem_get_aperture aperture;
drm_i915_getparam_t gp;
int ret, i;
unsigned long size;
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
bufmgr_gem->fd = fd;
@ -1483,8 +1465,10 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
bufmgr_gem->bufmgr.get_pipe_from_crtc_id = drm_intel_gem_get_pipe_from_crtc_id;
/* Initialize the linked lists for BO reuse cache. */
for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++)
for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
bufmgr_gem->cache_bucket[i].size = size;
}
return &bufmgr_gem->bufmgr;
}