intel-gem: Move debug-only functions to a separate file.
parent
6d258ddf77
commit
04ae66db1c
|
@ -21,7 +21,7 @@ mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
|
|||
i810-objs := i810_drv.o i810_dma.o
|
||||
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
|
||||
i915_buffer.o i915_compat.o i915_execbuf.o \
|
||||
i915_gem.o i915_gem_proc.o i915_gem_tiling.o
|
||||
i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o
|
||||
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
|
||||
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
|
||||
nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
|
||||
|
|
|
@ -32,20 +32,6 @@
|
|||
#include "i915_drv.h"
|
||||
#include <linux/swap.h>
|
||||
|
||||
#define WATCH_COHERENCY 0
|
||||
#define WATCH_BUF 0
|
||||
#define WATCH_EXEC 0
|
||||
#define WATCH_LRU 0
|
||||
#define WATCH_RELOC 0
|
||||
#define WATCH_INACTIVE 0
|
||||
#define WATCH_PWRITE 0
|
||||
|
||||
#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
|
||||
static void
|
||||
i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
const char *where, uint32_t mark);
|
||||
#endif
|
||||
|
||||
static int
|
||||
i915_gem_object_set_domain(struct drm_gem_object *obj,
|
||||
uint32_t read_domains,
|
||||
|
@ -59,9 +45,6 @@ static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
|
|||
static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
|
||||
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
|
||||
|
||||
static void
|
||||
i915_gem_clflush_object(struct drm_gem_object *obj);
|
||||
|
||||
int
|
||||
i915_gem_init_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
|
@ -541,25 +524,6 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
|
|||
&dev_priv->mm.active_list);
|
||||
}
|
||||
|
||||
#if WATCH_INACTIVE
|
||||
static void
|
||||
i915_verify_inactive(struct drm_device *dev, char *file, int line)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
|
||||
obj = obj_priv->obj;
|
||||
if (obj_priv->pin_count || obj_priv->active || (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
|
||||
DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
|
||||
obj,
|
||||
obj_priv->pin_count, obj_priv->active, obj->write_domain, file, line);
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define i915_verify_inactive(dev,file,line)
|
||||
#endif
|
||||
|
||||
static void
|
||||
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
|
||||
|
@ -1006,83 +970,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
|
||||
static void
|
||||
i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
|
||||
uint32_t bias, uint32_t mark)
|
||||
{
|
||||
uint32_t *mem = kmap_atomic(page, KM_USER0);
|
||||
int i;
|
||||
for (i = start; i < end; i += 4)
|
||||
DRM_INFO("%08x: %08x%s\n",
|
||||
(int) (bias + i), mem[i / 4],
|
||||
(bias + i == mark) ? " ********" : "");
|
||||
kunmap_atomic(mem, KM_USER0);
|
||||
/* give syslog time to catch up */
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
const char *where, uint32_t mark)
|
||||
{
|
||||
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||
int page;
|
||||
|
||||
DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
|
||||
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
|
||||
int page_len, chunk, chunk_len;
|
||||
|
||||
page_len = len - page * PAGE_SIZE;
|
||||
if (page_len > PAGE_SIZE)
|
||||
page_len = PAGE_SIZE;
|
||||
|
||||
for (chunk = 0; chunk < page_len; chunk += 128) {
|
||||
chunk_len = page_len - chunk;
|
||||
if (chunk_len > 128)
|
||||
chunk_len = 128;
|
||||
i915_gem_dump_page(obj_priv->page_list[page],
|
||||
chunk, chunk + chunk_len,
|
||||
obj_priv->gtt_offset +
|
||||
page * PAGE_SIZE,
|
||||
mark);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if WATCH_LRU
|
||||
static void
|
||||
i915_dump_lru(struct drm_device *dev, const char *where)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
|
||||
DRM_INFO("active list %s {\n", where);
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
|
||||
list)
|
||||
{
|
||||
DRM_INFO(" %p: %08x\n", obj_priv,
|
||||
obj_priv->last_rendering_seqno);
|
||||
}
|
||||
DRM_INFO("}\n");
|
||||
DRM_INFO("flushing list %s {\n", where);
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
|
||||
list)
|
||||
{
|
||||
DRM_INFO(" %p: %08x\n", obj_priv,
|
||||
obj_priv->last_rendering_seqno);
|
||||
}
|
||||
DRM_INFO("}\n");
|
||||
DRM_INFO("inactive %s {\n", where);
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
|
||||
DRM_INFO(" %p: %08x\n", obj_priv,
|
||||
obj_priv->last_rendering_seqno);
|
||||
}
|
||||
DRM_INFO("}\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
i915_gem_evict_something(struct drm_device *dev)
|
||||
{
|
||||
|
@ -1308,7 +1195,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
i915_gem_clflush_object(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||
|
@ -1540,76 +1427,6 @@ i915_gem_dev_set_domain(struct drm_device *dev)
|
|||
return flush_domains;
|
||||
}
|
||||
|
||||
#if WATCH_COHERENCY
|
||||
static void
|
||||
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||
int page;
|
||||
uint32_t *gtt_mapping;
|
||||
uint32_t *backing_map = NULL;
|
||||
int bad_count = 0;
|
||||
|
||||
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
|
||||
__func__, obj, obj_priv->gtt_offset, handle,
|
||||
obj->size / 1024);
|
||||
|
||||
gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
|
||||
obj->size);
|
||||
if (gtt_mapping == NULL) {
|
||||
DRM_ERROR("failed to map GTT space\n");
|
||||
return;
|
||||
}
|
||||
|
||||
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
|
||||
int i;
|
||||
|
||||
backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
|
||||
|
||||
if (backing_map == NULL) {
|
||||
DRM_ERROR("failed to map backing page\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < PAGE_SIZE / 4; i++) {
|
||||
uint32_t cpuval = backing_map[i];
|
||||
uint32_t gttval = readl(gtt_mapping +
|
||||
page * 1024 + i);
|
||||
|
||||
if (cpuval != gttval) {
|
||||
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
|
||||
"0x%08x vs 0x%08x\n",
|
||||
(int)(obj_priv->gtt_offset +
|
||||
page * PAGE_SIZE + i * 4),
|
||||
cpuval, gttval);
|
||||
if (bad_count++ >= 8) {
|
||||
DRM_INFO("...\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
kunmap_atomic(backing_map, KM_USER0);
|
||||
backing_map = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
if (backing_map != NULL)
|
||||
kunmap_atomic(backing_map, KM_USER0);
|
||||
iounmap(gtt_mapping);
|
||||
|
||||
/* give syslog time to catch up */
|
||||
msleep(1);
|
||||
|
||||
/* Directly flush the object, since we just loaded values with the CPU
|
||||
* from thebacking pages and we don't want to disturb the cache
|
||||
* management that we're trying to observe.
|
||||
*/
|
||||
|
||||
i915_gem_clflush_object(obj);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Pin an object to the GTT and evaluate the relocations landing in it.
|
||||
*/
|
||||
|
|
|
@ -72,6 +72,14 @@
|
|||
struct drm_i915_validate_buffer;
|
||||
#endif
|
||||
|
||||
#define WATCH_COHERENCY 0
|
||||
#define WATCH_BUF 0
|
||||
#define WATCH_EXEC 0
|
||||
#define WATCH_LRU 0
|
||||
#define WATCH_RELOC 0
|
||||
#define WATCH_INACTIVE 0
|
||||
#define WATCH_PWRITE 0
|
||||
|
||||
typedef struct _drm_i915_ring_buffer {
|
||||
int tail_mask;
|
||||
unsigned long Size;
|
||||
|
@ -541,11 +549,25 @@ void i915_gem_lastclose(struct drm_device *dev);
|
|||
uint32_t i915_get_gem_seqno(struct drm_device *dev);
|
||||
void i915_gem_retire_requests(struct drm_device *dev);
|
||||
void i915_gem_retire_work_handler(struct work_struct *work);
|
||||
void i915_gem_clflush_object(struct drm_gem_object *obj);
|
||||
#endif
|
||||
|
||||
/* i915_gem_tiling.c */
|
||||
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
|
||||
|
||||
/* i915_gem_debug.c */
|
||||
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
const char *where, uint32_t mark);
|
||||
#if WATCH_INACTIVE
|
||||
void i915_verify_inactive(struct drm_device *dev, char *file, int line);
|
||||
#else
|
||||
#define i915_verify_inactive(dev,file,line)
|
||||
#endif
|
||||
void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
|
||||
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
const char *where, uint32_t mark);
|
||||
void i915_dump_lru(struct drm_device *dev, const char *where);
|
||||
|
||||
#ifdef __linux__
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
|
||||
extern void intel_init_chipset_flush_compat(struct drm_device *dev);
|
||||
|
|
Loading…
Reference in New Issue