intel: Mark cached bo as purgeable
Set the DONTNEED flag on cached buffers so that the kernel is free to discard those when under memory pressure. [anholt: This takes firefox-talos-gfx time from ~62 seconds to ~65 seconds on my GM965, but it seems like a hit worth taking for the improved functionality from saving memory] Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Eric Anholt <eric@anholt.net>main
parent
12d9b7cc85
commit
0fb215ae31
|
@ -191,6 +191,9 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
|
|||
static void
|
||||
drm_intel_gem_bo_unreference(drm_intel_bo *bo);
|
||||
|
||||
static void
|
||||
drm_intel_gem_bo_free(drm_intel_bo *bo);
|
||||
|
||||
static struct drm_intel_gem_bo_bucket *
|
||||
drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
|
||||
unsigned long size)
|
||||
|
@ -315,6 +318,38 @@ drm_intel_gem_bo_busy(drm_intel_bo *bo)
|
|||
return (ret == 0 && busy.busy);
|
||||
}
|
||||
|
||||
static int
|
||||
drm_intel_gem_bo_madvise(drm_intel_bufmgr_gem *bufmgr_gem,
|
||||
drm_intel_bo_gem *bo_gem,
|
||||
int state)
|
||||
{
|
||||
struct drm_i915_gem_madvise madv;
|
||||
|
||||
madv.handle = bo_gem->gem_handle;
|
||||
madv.madv = state;
|
||||
madv.retained = 1;
|
||||
ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
|
||||
|
||||
return madv.retained;
|
||||
}
|
||||
|
||||
/* drop the oldest entries that have been purged by the kernel */
|
||||
static void
|
||||
drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
|
||||
struct drm_intel_gem_bo_bucket *bucket)
|
||||
{
|
||||
while (!DRMLISTEMPTY(&bucket->head)) {
|
||||
drm_intel_bo_gem *bo_gem;
|
||||
|
||||
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
|
||||
if (drm_intel_gem_bo_madvise (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
|
||||
break;
|
||||
|
||||
DRMLISTDEL(&bo_gem->head);
|
||||
drm_intel_gem_bo_free(&bo_gem->bo);
|
||||
}
|
||||
}
|
||||
|
||||
static drm_intel_bo *
|
||||
drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment,
|
||||
|
@ -325,7 +360,7 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
|
|||
unsigned int page_size = getpagesize();
|
||||
int ret;
|
||||
struct drm_intel_gem_bo_bucket *bucket;
|
||||
int alloc_from_cache = 0;
|
||||
int alloc_from_cache;
|
||||
unsigned long bo_size;
|
||||
|
||||
/* Round the allocated size up to a power of two number of pages. */
|
||||
|
@ -344,6 +379,8 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
|
|||
|
||||
pthread_mutex_lock(&bufmgr_gem->lock);
|
||||
/* Get a buffer out of the cache if available */
|
||||
retry:
|
||||
alloc_from_cache = 0;
|
||||
if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
|
||||
if (for_render) {
|
||||
/* Allocate new render-target BOs from the tail (MRU)
|
||||
|
@ -361,12 +398,19 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
|
|||
* waiting for the GPU to finish.
|
||||
*/
|
||||
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
|
||||
|
||||
if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
|
||||
alloc_from_cache = 1;
|
||||
DRMLISTDEL(&bo_gem->head);
|
||||
}
|
||||
}
|
||||
|
||||
if (alloc_from_cache) {
|
||||
if(!drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
|
||||
drm_intel_gem_bo_free(&bo_gem->bo);
|
||||
drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem, bucket);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&bufmgr_gem->lock);
|
||||
|
||||
|
@ -591,6 +635,7 @@ drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
|
|||
|
||||
DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
|
||||
|
||||
drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem, I915_MADV_DONTNEED);
|
||||
drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
|
||||
} else {
|
||||
drm_intel_gem_bo_free(bo);
|
||||
|
|
|
@ -206,6 +206,7 @@ typedef struct drm_i915_sarea {
|
|||
#define DRM_I915_GEM_GET_APERTURE 0x23
|
||||
#define DRM_I915_GEM_MMAP_GTT 0x24
|
||||
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
|
||||
#define DRM_I915_GEM_MADVISE 0x26
|
||||
|
||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||
|
@ -244,6 +245,7 @@ typedef struct drm_i915_sarea {
|
|||
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
|
||||
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
|
||||
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
|
||||
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
|
||||
|
||||
/* Asynchronous page flipping:
|
||||
*/
|
||||
|
@ -727,4 +729,18 @@ struct drm_i915_get_pipe_from_crtc_id {
|
|||
uint32_t pipe;
|
||||
};
|
||||
|
||||
#define I915_MADV_WILLNEED 0
|
||||
#define I915_MADV_DONTNEED 1
|
||||
|
||||
struct drm_i915_gem_madvise {
|
||||
/** Handle of the buffer to change the backing store advice. */
|
||||
uint32_t handle;
|
||||
|
||||
/** Advice. */
|
||||
uint32_t madv;
|
||||
|
||||
/** Whether or not the backing store still exists */
|
||||
uint32_t retained;
|
||||
};
|
||||
|
||||
#endif /* _I915_DRM_H_ */
|
||||
|
|
Loading…
Reference in New Issue