Merged drmfntbl-0-0-1
parent
93e8c201af
commit
5c9ed83094
|
@ -100,19 +100,6 @@ typedef struct drm_file drm_file_t;
|
|||
|
||||
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
|
||||
|
||||
/* Mapping helper macros */
|
||||
#define DRM_IOREMAP(map, dev) \
|
||||
(map)->handle = DRM(ioremap)( dev, map )
|
||||
|
||||
#define DRM_IOREMAP_NOCACHE(map, dev) \
|
||||
(map)->handle = DRM(ioremap_nocache)( dev, map )
|
||||
|
||||
#define DRM_IOREMAPFREE(map, dev) \
|
||||
do { \
|
||||
if ( (map)->handle && (map)->size ) \
|
||||
DRM(ioremapfree)( map ); \
|
||||
} while (0)
|
||||
|
||||
/* Internal types and structures */
|
||||
#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
|
||||
#define DRM_MIN(a,b) ((a)<(b)?(a):(b))
|
||||
|
@ -302,6 +289,32 @@ typedef struct drm_vbl_sig {
|
|||
int pid;
|
||||
} drm_vbl_sig_t;
|
||||
|
||||
/**
|
||||
* DRM device functions structure
|
||||
*/
|
||||
struct drm_device;
|
||||
|
||||
struct drm_driver_fn {
|
||||
int (*preinit)(struct drm_device *, unsigned long flags);
|
||||
int (*postinit)(struct drm_device *, unsigned long flags);
|
||||
void (*prerelease)(struct drm_device *, void *filp);
|
||||
void (*pretakedown)(struct drm_device *);
|
||||
int (*postcleanup)(struct drm_device *);
|
||||
int (*presetup)(struct drm_device *);
|
||||
int (*postsetup)(struct drm_device *);
|
||||
void (*open_helper)(struct drm_device *, drm_file_t *);
|
||||
void (*release)(struct drm_device *, void *filp);
|
||||
void (*dma_ready)(struct drm_device *);
|
||||
int (*dma_quiescent)(struct drm_device *);
|
||||
int (*dma_flush_block_and_flush)(struct drm_device *, int context, drm_lock_flags_t flags);
|
||||
int (*dma_flush_unblock)(struct drm_device *, int context, drm_lock_flags_t flags);
|
||||
int (*context_ctor)(struct drm_device *dev, int context);
|
||||
int (*context_dtor)(struct drm_device *dev, int context);
|
||||
int (*kernel_context_switch)(struct drm_device *dev, int old, int new);
|
||||
int (*kernel_context_switch_unlock)(struct drm_device *dev);
|
||||
int (*dma_schedule)(struct drm_device *dev, int locked);
|
||||
};
|
||||
|
||||
struct drm_device {
|
||||
#ifdef __NetBSD__
|
||||
struct device device; /* NetBSD's softc is an extension of struct device */
|
||||
|
@ -393,8 +406,13 @@ struct drm_device {
|
|||
drm_sg_mem_t *sg; /* Scatter gather memory */
|
||||
atomic_t *ctx_bitmap;
|
||||
void *dev_private;
|
||||
struct drm_driver_fn fn_tbl;
|
||||
drm_local_map_t *agp_buffer_map;
|
||||
int dev_priv_size;
|
||||
};
|
||||
|
||||
extern void DRM(driver_register_fns)(struct drm_device *dev);
|
||||
|
||||
extern int DRM(flags);
|
||||
|
||||
/* Memory management support (drm_memory.h) */
|
||||
|
@ -570,5 +588,37 @@ extern void *DRM(pci_alloc)(drm_device_t *dev, size_t size,
|
|||
extern void DRM(pci_free)(drm_device_t *dev, size_t size,
|
||||
void *vaddr, dma_addr_t busaddr);
|
||||
|
||||
/* Inline replacements for DRM_IOREMAP macros */
|
||||
static __inline__ void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
|
||||
{
|
||||
map->handle = DRM(ioremap)( dev, map );
|
||||
}
|
||||
#if 0
|
||||
static __inline__ void drm_core_ioremap_nocache(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
map->handle = DRM(ioremap_nocache)(dev, map);
|
||||
}
|
||||
#endif
|
||||
static __inline__ void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
|
||||
{
|
||||
if ( map->handle && map->size )
|
||||
DRM(ioremapfree)( map );
|
||||
}
|
||||
|
||||
static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev, unsigned long offset)
|
||||
{
|
||||
drm_map_list_entry_t *listentry;
|
||||
TAILQ_FOREACH(listentry, dev->maplist, link) {
|
||||
if ( listentry->map->offset == offset ) {
|
||||
return listentry->map;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_dropmap(struct drm_map *map)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _DRM_P_H_ */
|
||||
|
|
|
@ -40,17 +40,6 @@
|
|||
#define __HAVE_SG 0
|
||||
#endif
|
||||
|
||||
#ifndef DRIVER_BUF_PRIV_T
|
||||
#define DRIVER_BUF_PRIV_T u32
|
||||
#endif
|
||||
#ifndef DRIVER_AGP_BUFFERS_MAP
|
||||
#if __HAVE_AGP && __HAVE_DMA
|
||||
#error "You must define DRIVER_AGP_BUFFERS_MAP()"
|
||||
#else
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Compute order. Can be made faster.
|
||||
*/
|
||||
|
@ -111,7 +100,7 @@ int DRM(addmap)( DRM_IOCTL_ARGS )
|
|||
|
||||
switch ( map->type ) {
|
||||
case _DRM_REGISTERS:
|
||||
DRM_IOREMAP(map, dev);
|
||||
drm_core_ioremap(map, dev);
|
||||
if (!(map->flags & _DRM_WRITE_COMBINING))
|
||||
break;
|
||||
/* FALLTHROUGH */
|
||||
|
@ -353,7 +342,7 @@ static int DRM(addbufs_agp)(drm_device_t *dev, drm_buf_desc_t *request)
|
|||
buf->pending = 0;
|
||||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_priv_size = dev->dev_priv_size;
|
||||
buf->dev_private = DRM(calloc)(1, buf->dev_priv_size,
|
||||
DRM_MEM_BUFS);
|
||||
if (buf->dev_private == NULL) {
|
||||
|
@ -512,8 +501,8 @@ static int DRM(addbufs_pci)(drm_device_t *dev, drm_buf_desc_t *request)
|
|||
buf->pending = 0;
|
||||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_private = DRM(alloc)(sizeof(DRIVER_BUF_PRIV_T),
|
||||
buf->dev_priv_size = dev->dev_priv_size;
|
||||
buf->dev_private = DRM(alloc)(buf->dev_priv_size,
|
||||
DRM_MEM_BUFS);
|
||||
if (buf->dev_private == NULL) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
|
@ -636,7 +625,7 @@ static int DRM(addbufs_sg)(drm_device_t *dev, drm_buf_desc_t *request)
|
|||
buf->pending = 0;
|
||||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_priv_size = dev->dev_priv_size;
|
||||
buf->dev_private = DRM(calloc)(1, buf->dev_priv_size,
|
||||
DRM_MEM_BUFS);
|
||||
if (buf->dev_private == NULL) {
|
||||
|
@ -907,7 +896,7 @@ int DRM(mapbufs)( DRM_IOCTL_ARGS )
|
|||
|
||||
if ((__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
|
||||
(__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG))) {
|
||||
drm_local_map_t *map = DRIVER_AGP_BUFFERS_MAP(dev);
|
||||
drm_local_map_t *map = dev->agp_buffer_map;
|
||||
|
||||
if (map == NULL) {
|
||||
retcode = EINVAL;
|
||||
|
|
|
@ -278,10 +278,8 @@ int DRM(addctx)( DRM_IOCTL_ARGS )
|
|||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
|
||||
#ifdef DRIVER_CTX_CTOR
|
||||
if ( ctx.handle != DRM_KERNEL_CONTEXT )
|
||||
DRIVER_CTX_CTOR(dev, ctx.handle);
|
||||
#endif
|
||||
if ( dev->fn_tbl.context_ctor && ctx.handle != DRM_KERNEL_CONTEXT )
|
||||
dev->fn_tbl.context_ctor(dev, ctx.handle);
|
||||
|
||||
DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );
|
||||
|
||||
|
@ -341,9 +339,9 @@ int DRM(rmctx)( DRM_IOCTL_ARGS )
|
|||
|
||||
DRM_DEBUG( "%d\n", ctx.handle );
|
||||
if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
|
||||
#ifdef DRIVER_CTX_DTOR
|
||||
DRIVER_CTX_DTOR(dev, ctx.handle);
|
||||
#endif
|
||||
if (dev->fn_tbl.context_dtor)
|
||||
dev->fn_tbl.context_dtor(dev, ctx.handle);
|
||||
|
||||
DRM(ctxbitmap_free)( dev, ctx.handle );
|
||||
}
|
||||
|
||||
|
|
|
@ -77,33 +77,9 @@
|
|||
#define __HAVE_SG 0
|
||||
#endif
|
||||
|
||||
#ifndef DRIVER_PREINIT
|
||||
#define DRIVER_PREINIT(dev) do {} while (0)
|
||||
#endif
|
||||
#ifndef DRIVER_POSTINIT
|
||||
#define DRIVER_POSTINIT(dev) do {} while (0)
|
||||
#endif
|
||||
#ifndef DRIVER_PRERELEASE
|
||||
#define DRIVER_PRERELEASE()
|
||||
#endif
|
||||
#ifndef DRIVER_PRETAKEDOWN
|
||||
#define DRIVER_PRETAKEDOWN(dev)
|
||||
#endif
|
||||
#ifndef DRIVER_POSTCLEANUP
|
||||
#define DRIVER_POSTCLEANUP()
|
||||
#endif
|
||||
#ifndef DRIVER_PRESETUP
|
||||
#define DRIVER_PRESETUP()
|
||||
#endif
|
||||
#ifndef DRIVER_POSTSETUP
|
||||
#define DRIVER_POSTSETUP()
|
||||
#endif
|
||||
#ifndef DRIVER_IOCTLS
|
||||
#define DRIVER_IOCTLS
|
||||
#endif
|
||||
#ifndef DRIVER_OPEN_HELPER
|
||||
#define DRIVER_OPEN_HELPER( priv, dev )
|
||||
#endif
|
||||
#ifndef DRIVER_FOPS
|
||||
#endif
|
||||
|
||||
|
@ -437,7 +413,9 @@ static int DRM(setup)( drm_device_t *dev )
|
|||
|
||||
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
|
||||
|
||||
DRIVER_PRESETUP();
|
||||
if (dev->fn_tbl.presetup)
|
||||
dev->fn_tbl.presetup(dev);
|
||||
|
||||
dev->buf_use = 0;
|
||||
|
||||
#if __HAVE_DMA
|
||||
|
@ -507,7 +485,9 @@ static int DRM(setup)( drm_device_t *dev )
|
|||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
DRIVER_POSTSETUP();
|
||||
if (dev->fn_tbl.postsetup)
|
||||
dev->fn_tbl.postsetup(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -523,7 +503,9 @@ static int DRM(takedown)( drm_device_t *dev )
|
|||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
DRIVER_PRETAKEDOWN(dev);
|
||||
if (dev->fn_tbl.pretakedown)
|
||||
dev->fn_tbl.pretakedown(dev);
|
||||
|
||||
#if __HAVE_IRQ
|
||||
if (dev->irq_enabled)
|
||||
DRM(irq_uninstall)( dev );
|
||||
|
@ -638,8 +620,7 @@ static int DRM(init)( device_t nbdev )
|
|||
int retcode;
|
||||
#endif
|
||||
DRM_DEBUG( "\n" );
|
||||
DRIVER_PREINIT(dev);
|
||||
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
unit = device_get_unit(nbdev);
|
||||
dev = device_get_softc(nbdev);
|
||||
|
@ -650,6 +631,13 @@ static int DRM(init)( device_t nbdev )
|
|||
else
|
||||
dev->device = nbdev;
|
||||
|
||||
/* dev_priv_size can be changed by a driver in driver_register_fns */
|
||||
dev->dev_priv_size = sizeof(u32);
|
||||
DRM(driver_register_fns)(dev);
|
||||
|
||||
if (dev->fn_tbl.preinit)
|
||||
dev->fn_tbl.preinit(dev, 0);
|
||||
|
||||
dev->devnode = make_dev( &DRM(cdevsw),
|
||||
unit,
|
||||
DRM_DEV_UID,
|
||||
|
@ -661,6 +649,14 @@ static int DRM(init)( device_t nbdev )
|
|||
#endif
|
||||
#elif defined(__NetBSD__)
|
||||
unit = minor(dev->device.dv_unit);
|
||||
|
||||
/* dev_priv_size can be changed by a driver in driver_register_fns */
|
||||
dev->dev_priv_size = sizeof(u32);
|
||||
DRM(driver_register_fns)(dev);
|
||||
|
||||
if (dev->fn_tbl.preinit)
|
||||
dev->fn_tbl.preinit(dev, 0);
|
||||
|
||||
#endif
|
||||
|
||||
dev->irq = pci_get_irq(dev->device);
|
||||
|
@ -716,7 +712,8 @@ static int DRM(init)( device_t nbdev )
|
|||
DRIVER_DATE,
|
||||
unit );
|
||||
|
||||
DRIVER_POSTINIT(dev);
|
||||
if (dev->fn_tbl.postinit)
|
||||
dev->fn_tbl.postinit(dev, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -773,7 +770,9 @@ static void DRM(cleanup)(drm_device_t *dev)
|
|||
dev->agp = NULL;
|
||||
}
|
||||
#endif
|
||||
DRIVER_POSTCLEANUP();
|
||||
if (dev->fn_tbl.postcleanup)
|
||||
dev->fn_tbl.postcleanup(dev);
|
||||
|
||||
DRM(mem_uninit)();
|
||||
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
|
||||
mtx_destroy(&dev->dev_lock);
|
||||
|
@ -854,7 +853,8 @@ int DRM(close)(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
|
|||
return EINVAL;
|
||||
}
|
||||
|
||||
DRIVER_PRERELEASE();
|
||||
if (dev->fn_tbl.prerelease)
|
||||
dev->fn_tbl.prerelease(dev, filp);
|
||||
|
||||
/* ========================================================
|
||||
* Begin inline drm_release
|
||||
|
|
|
@ -90,7 +90,8 @@ int DRM(open_helper)(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
|
|||
priv->ioctl_count = 0;
|
||||
priv->authenticated = !DRM_SUSER(p);
|
||||
|
||||
DRIVER_OPEN_HELPER( priv, dev );
|
||||
if (dev->fn_tbl.open_helper)
|
||||
dev->fn_tbl.open_helper(dev, priv);
|
||||
|
||||
TAILQ_INSERT_TAIL(&dev->files, priv, link);
|
||||
}
|
||||
|
|
|
@ -447,18 +447,6 @@ find_first_zero_bit(volatile void *p, int max)
|
|||
#define DRM_SYSCTL_HANDLER_ARGS SYSCTL_HANDLER_ARGS
|
||||
#endif
|
||||
|
||||
#define DRM_FIND_MAP(dest, o) \
|
||||
do { \
|
||||
drm_map_list_entry_t *listentry; \
|
||||
TAILQ_FOREACH(listentry, dev->maplist, link) { \
|
||||
if ( listentry->map->offset == o ) { \
|
||||
dest = listentry->map; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* Internal functions */
|
||||
|
||||
/* drm_drv.h */
|
||||
|
|
|
@ -357,17 +357,6 @@ do { \
|
|||
#define DRM_DEBUG(fmt, arg...) do { } while (0)
|
||||
#endif
|
||||
|
||||
#define DRM_FIND_MAP(dest, o) \
|
||||
do { \
|
||||
drm_map_list_entry_t *listentry; \
|
||||
TAILQ_FOREACH(listentry, dev->maplist, link) { \
|
||||
if ( listentry->map->offset == o ) { \
|
||||
dest = listentry->map; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/* Internal functions */
|
||||
|
||||
/* drm_drv.h */
|
||||
|
|
|
@ -50,3 +50,4 @@ DRIVER_MODULE(sisdrm, pci, sisdrv_driver, sisdrv_devclass, 0, 0);
|
|||
#elif defined(__NetBSD__)
|
||||
CFDRIVER_DECL(sis, DV_TTY, NULL);
|
||||
#endif /* __FreeBSD__ */
|
||||
|
||||
|
|
|
@ -52,3 +52,7 @@ DRIVER_MODULE(tdfx, pci, tdfx_driver, tdfx_devclass, 0, 0);
|
|||
#elif defined(__NetBSD__)
|
||||
CFDRIVER_DECL(tdfx, DV_TTY, NULL);
|
||||
#endif /* __FreeBSD__ */
|
||||
|
||||
void DRM(driver_register_fns)(drm_device_t *dev)
|
||||
{
|
||||
}
|
||||
|
|
76
bsd/drmP.h
76
bsd/drmP.h
|
@ -100,19 +100,6 @@ typedef struct drm_file drm_file_t;
|
|||
|
||||
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
|
||||
|
||||
/* Mapping helper macros */
|
||||
#define DRM_IOREMAP(map, dev) \
|
||||
(map)->handle = DRM(ioremap)( dev, map )
|
||||
|
||||
#define DRM_IOREMAP_NOCACHE(map, dev) \
|
||||
(map)->handle = DRM(ioremap_nocache)( dev, map )
|
||||
|
||||
#define DRM_IOREMAPFREE(map, dev) \
|
||||
do { \
|
||||
if ( (map)->handle && (map)->size ) \
|
||||
DRM(ioremapfree)( map ); \
|
||||
} while (0)
|
||||
|
||||
/* Internal types and structures */
|
||||
#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
|
||||
#define DRM_MIN(a,b) ((a)<(b)?(a):(b))
|
||||
|
@ -302,6 +289,32 @@ typedef struct drm_vbl_sig {
|
|||
int pid;
|
||||
} drm_vbl_sig_t;
|
||||
|
||||
/**
|
||||
* DRM device functions structure
|
||||
*/
|
||||
struct drm_device;
|
||||
|
||||
struct drm_driver_fn {
|
||||
int (*preinit)(struct drm_device *, unsigned long flags);
|
||||
int (*postinit)(struct drm_device *, unsigned long flags);
|
||||
void (*prerelease)(struct drm_device *, void *filp);
|
||||
void (*pretakedown)(struct drm_device *);
|
||||
int (*postcleanup)(struct drm_device *);
|
||||
int (*presetup)(struct drm_device *);
|
||||
int (*postsetup)(struct drm_device *);
|
||||
void (*open_helper)(struct drm_device *, drm_file_t *);
|
||||
void (*release)(struct drm_device *, void *filp);
|
||||
void (*dma_ready)(struct drm_device *);
|
||||
int (*dma_quiescent)(struct drm_device *);
|
||||
int (*dma_flush_block_and_flush)(struct drm_device *, int context, drm_lock_flags_t flags);
|
||||
int (*dma_flush_unblock)(struct drm_device *, int context, drm_lock_flags_t flags);
|
||||
int (*context_ctor)(struct drm_device *dev, int context);
|
||||
int (*context_dtor)(struct drm_device *dev, int context);
|
||||
int (*kernel_context_switch)(struct drm_device *dev, int old, int new);
|
||||
int (*kernel_context_switch_unlock)(struct drm_device *dev);
|
||||
int (*dma_schedule)(struct drm_device *dev, int locked);
|
||||
};
|
||||
|
||||
struct drm_device {
|
||||
#ifdef __NetBSD__
|
||||
struct device device; /* NetBSD's softc is an extension of struct device */
|
||||
|
@ -393,8 +406,13 @@ struct drm_device {
|
|||
drm_sg_mem_t *sg; /* Scatter gather memory */
|
||||
atomic_t *ctx_bitmap;
|
||||
void *dev_private;
|
||||
struct drm_driver_fn fn_tbl;
|
||||
drm_local_map_t *agp_buffer_map;
|
||||
int dev_priv_size;
|
||||
};
|
||||
|
||||
extern void DRM(driver_register_fns)(struct drm_device *dev);
|
||||
|
||||
extern int DRM(flags);
|
||||
|
||||
/* Memory management support (drm_memory.h) */
|
||||
|
@ -570,5 +588,37 @@ extern void *DRM(pci_alloc)(drm_device_t *dev, size_t size,
|
|||
extern void DRM(pci_free)(drm_device_t *dev, size_t size,
|
||||
void *vaddr, dma_addr_t busaddr);
|
||||
|
||||
/* Inline replacements for DRM_IOREMAP macros */
|
||||
static __inline__ void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
|
||||
{
|
||||
map->handle = DRM(ioremap)( dev, map );
|
||||
}
|
||||
#if 0
|
||||
static __inline__ void drm_core_ioremap_nocache(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
map->handle = DRM(ioremap_nocache)(dev, map);
|
||||
}
|
||||
#endif
|
||||
static __inline__ void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
|
||||
{
|
||||
if ( map->handle && map->size )
|
||||
DRM(ioremapfree)( map );
|
||||
}
|
||||
|
||||
static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev, unsigned long offset)
|
||||
{
|
||||
drm_map_list_entry_t *listentry;
|
||||
TAILQ_FOREACH(listentry, dev->maplist, link) {
|
||||
if ( listentry->map->offset == offset ) {
|
||||
return listentry->map;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_dropmap(struct drm_map *map)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _DRM_P_H_ */
|
||||
|
|
|
@ -40,17 +40,6 @@
|
|||
#define __HAVE_SG 0
|
||||
#endif
|
||||
|
||||
#ifndef DRIVER_BUF_PRIV_T
|
||||
#define DRIVER_BUF_PRIV_T u32
|
||||
#endif
|
||||
#ifndef DRIVER_AGP_BUFFERS_MAP
|
||||
#if __HAVE_AGP && __HAVE_DMA
|
||||
#error "You must define DRIVER_AGP_BUFFERS_MAP()"
|
||||
#else
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Compute order. Can be made faster.
|
||||
*/
|
||||
|
@ -111,7 +100,7 @@ int DRM(addmap)( DRM_IOCTL_ARGS )
|
|||
|
||||
switch ( map->type ) {
|
||||
case _DRM_REGISTERS:
|
||||
DRM_IOREMAP(map, dev);
|
||||
drm_core_ioremap(map, dev);
|
||||
if (!(map->flags & _DRM_WRITE_COMBINING))
|
||||
break;
|
||||
/* FALLTHROUGH */
|
||||
|
@ -353,7 +342,7 @@ static int DRM(addbufs_agp)(drm_device_t *dev, drm_buf_desc_t *request)
|
|||
buf->pending = 0;
|
||||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_priv_size = dev->dev_priv_size;
|
||||
buf->dev_private = DRM(calloc)(1, buf->dev_priv_size,
|
||||
DRM_MEM_BUFS);
|
||||
if (buf->dev_private == NULL) {
|
||||
|
@ -512,8 +501,8 @@ static int DRM(addbufs_pci)(drm_device_t *dev, drm_buf_desc_t *request)
|
|||
buf->pending = 0;
|
||||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_private = DRM(alloc)(sizeof(DRIVER_BUF_PRIV_T),
|
||||
buf->dev_priv_size = dev->dev_priv_size;
|
||||
buf->dev_private = DRM(alloc)(buf->dev_priv_size,
|
||||
DRM_MEM_BUFS);
|
||||
if (buf->dev_private == NULL) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
|
@ -636,7 +625,7 @@ static int DRM(addbufs_sg)(drm_device_t *dev, drm_buf_desc_t *request)
|
|||
buf->pending = 0;
|
||||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_priv_size = dev->dev_priv_size;
|
||||
buf->dev_private = DRM(calloc)(1, buf->dev_priv_size,
|
||||
DRM_MEM_BUFS);
|
||||
if (buf->dev_private == NULL) {
|
||||
|
@ -907,7 +896,7 @@ int DRM(mapbufs)( DRM_IOCTL_ARGS )
|
|||
|
||||
if ((__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
|
||||
(__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG))) {
|
||||
drm_local_map_t *map = DRIVER_AGP_BUFFERS_MAP(dev);
|
||||
drm_local_map_t *map = dev->agp_buffer_map;
|
||||
|
||||
if (map == NULL) {
|
||||
retcode = EINVAL;
|
||||
|
|
|
@ -278,10 +278,8 @@ int DRM(addctx)( DRM_IOCTL_ARGS )
|
|||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
|
||||
#ifdef DRIVER_CTX_CTOR
|
||||
if ( ctx.handle != DRM_KERNEL_CONTEXT )
|
||||
DRIVER_CTX_CTOR(dev, ctx.handle);
|
||||
#endif
|
||||
if ( dev->fn_tbl.context_ctor && ctx.handle != DRM_KERNEL_CONTEXT )
|
||||
dev->fn_tbl.context_ctor(dev, ctx.handle);
|
||||
|
||||
DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );
|
||||
|
||||
|
@ -341,9 +339,9 @@ int DRM(rmctx)( DRM_IOCTL_ARGS )
|
|||
|
||||
DRM_DEBUG( "%d\n", ctx.handle );
|
||||
if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
|
||||
#ifdef DRIVER_CTX_DTOR
|
||||
DRIVER_CTX_DTOR(dev, ctx.handle);
|
||||
#endif
|
||||
if (dev->fn_tbl.context_dtor)
|
||||
dev->fn_tbl.context_dtor(dev, ctx.handle);
|
||||
|
||||
DRM(ctxbitmap_free)( dev, ctx.handle );
|
||||
}
|
||||
|
||||
|
|
|
@ -77,33 +77,9 @@
|
|||
#define __HAVE_SG 0
|
||||
#endif
|
||||
|
||||
#ifndef DRIVER_PREINIT
|
||||
#define DRIVER_PREINIT(dev) do {} while (0)
|
||||
#endif
|
||||
#ifndef DRIVER_POSTINIT
|
||||
#define DRIVER_POSTINIT(dev) do {} while (0)
|
||||
#endif
|
||||
#ifndef DRIVER_PRERELEASE
|
||||
#define DRIVER_PRERELEASE()
|
||||
#endif
|
||||
#ifndef DRIVER_PRETAKEDOWN
|
||||
#define DRIVER_PRETAKEDOWN(dev)
|
||||
#endif
|
||||
#ifndef DRIVER_POSTCLEANUP
|
||||
#define DRIVER_POSTCLEANUP()
|
||||
#endif
|
||||
#ifndef DRIVER_PRESETUP
|
||||
#define DRIVER_PRESETUP()
|
||||
#endif
|
||||
#ifndef DRIVER_POSTSETUP
|
||||
#define DRIVER_POSTSETUP()
|
||||
#endif
|
||||
#ifndef DRIVER_IOCTLS
|
||||
#define DRIVER_IOCTLS
|
||||
#endif
|
||||
#ifndef DRIVER_OPEN_HELPER
|
||||
#define DRIVER_OPEN_HELPER( priv, dev )
|
||||
#endif
|
||||
#ifndef DRIVER_FOPS
|
||||
#endif
|
||||
|
||||
|
@ -437,7 +413,9 @@ static int DRM(setup)( drm_device_t *dev )
|
|||
|
||||
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
|
||||
|
||||
DRIVER_PRESETUP();
|
||||
if (dev->fn_tbl.presetup)
|
||||
dev->fn_tbl.presetup(dev);
|
||||
|
||||
dev->buf_use = 0;
|
||||
|
||||
#if __HAVE_DMA
|
||||
|
@ -507,7 +485,9 @@ static int DRM(setup)( drm_device_t *dev )
|
|||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
DRIVER_POSTSETUP();
|
||||
if (dev->fn_tbl.postsetup)
|
||||
dev->fn_tbl.postsetup(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -523,7 +503,9 @@ static int DRM(takedown)( drm_device_t *dev )
|
|||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
DRIVER_PRETAKEDOWN(dev);
|
||||
if (dev->fn_tbl.pretakedown)
|
||||
dev->fn_tbl.pretakedown(dev);
|
||||
|
||||
#if __HAVE_IRQ
|
||||
if (dev->irq_enabled)
|
||||
DRM(irq_uninstall)( dev );
|
||||
|
@ -638,8 +620,7 @@ static int DRM(init)( device_t nbdev )
|
|||
int retcode;
|
||||
#endif
|
||||
DRM_DEBUG( "\n" );
|
||||
DRIVER_PREINIT(dev);
|
||||
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
unit = device_get_unit(nbdev);
|
||||
dev = device_get_softc(nbdev);
|
||||
|
@ -650,6 +631,13 @@ static int DRM(init)( device_t nbdev )
|
|||
else
|
||||
dev->device = nbdev;
|
||||
|
||||
/* dev_priv_size can be changed by a driver in driver_register_fns */
|
||||
dev->dev_priv_size = sizeof(u32);
|
||||
DRM(driver_register_fns)(dev);
|
||||
|
||||
if (dev->fn_tbl.preinit)
|
||||
dev->fn_tbl.preinit(dev, 0);
|
||||
|
||||
dev->devnode = make_dev( &DRM(cdevsw),
|
||||
unit,
|
||||
DRM_DEV_UID,
|
||||
|
@ -661,6 +649,14 @@ static int DRM(init)( device_t nbdev )
|
|||
#endif
|
||||
#elif defined(__NetBSD__)
|
||||
unit = minor(dev->device.dv_unit);
|
||||
|
||||
/* dev_priv_size can be changed by a driver in driver_register_fns */
|
||||
dev->dev_priv_size = sizeof(u32);
|
||||
DRM(driver_register_fns)(dev);
|
||||
|
||||
if (dev->fn_tbl.preinit)
|
||||
dev->fn_tbl.preinit(dev, 0);
|
||||
|
||||
#endif
|
||||
|
||||
dev->irq = pci_get_irq(dev->device);
|
||||
|
@ -716,7 +712,8 @@ static int DRM(init)( device_t nbdev )
|
|||
DRIVER_DATE,
|
||||
unit );
|
||||
|
||||
DRIVER_POSTINIT(dev);
|
||||
if (dev->fn_tbl.postinit)
|
||||
dev->fn_tbl.postinit(dev, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -773,7 +770,9 @@ static void DRM(cleanup)(drm_device_t *dev)
|
|||
dev->agp = NULL;
|
||||
}
|
||||
#endif
|
||||
DRIVER_POSTCLEANUP();
|
||||
if (dev->fn_tbl.postcleanup)
|
||||
dev->fn_tbl.postcleanup(dev);
|
||||
|
||||
DRM(mem_uninit)();
|
||||
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
|
||||
mtx_destroy(&dev->dev_lock);
|
||||
|
@ -854,7 +853,8 @@ int DRM(close)(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
|
|||
return EINVAL;
|
||||
}
|
||||
|
||||
DRIVER_PRERELEASE();
|
||||
if (dev->fn_tbl.prerelease)
|
||||
dev->fn_tbl.prerelease(dev, filp);
|
||||
|
||||
/* ========================================================
|
||||
* Begin inline drm_release
|
||||
|
|
|
@ -90,7 +90,8 @@ int DRM(open_helper)(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
|
|||
priv->ioctl_count = 0;
|
||||
priv->authenticated = !DRM_SUSER(p);
|
||||
|
||||
DRIVER_OPEN_HELPER( priv, dev );
|
||||
if (dev->fn_tbl.open_helper)
|
||||
dev->fn_tbl.open_helper(dev, priv);
|
||||
|
||||
TAILQ_INSERT_TAIL(&dev->files, priv, link);
|
||||
}
|
||||
|
|
|
@ -447,18 +447,6 @@ find_first_zero_bit(volatile void *p, int max)
|
|||
#define DRM_SYSCTL_HANDLER_ARGS SYSCTL_HANDLER_ARGS
|
||||
#endif
|
||||
|
||||
#define DRM_FIND_MAP(dest, o) \
|
||||
do { \
|
||||
drm_map_list_entry_t *listentry; \
|
||||
TAILQ_FOREACH(listentry, dev->maplist, link) { \
|
||||
if ( listentry->map->offset == o ) { \
|
||||
dest = listentry->map; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* Internal functions */
|
||||
|
||||
/* drm_drv.h */
|
||||
|
|
|
@ -357,17 +357,6 @@ do { \
|
|||
#define DRM_DEBUG(fmt, arg...) do { } while (0)
|
||||
#endif
|
||||
|
||||
#define DRM_FIND_MAP(dest, o) \
|
||||
do { \
|
||||
drm_map_list_entry_t *listentry; \
|
||||
TAILQ_FOREACH(listentry, dev->maplist, link) { \
|
||||
if ( listentry->map->offset == o ) { \
|
||||
dest = listentry->map; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/* Internal functions */
|
||||
|
||||
/* drm_drv.h */
|
||||
|
|
|
@ -50,3 +50,4 @@ DRIVER_MODULE(sisdrm, pci, sisdrv_driver, sisdrv_devclass, 0, 0);
|
|||
#elif defined(__NetBSD__)
|
||||
CFDRIVER_DECL(sis, DV_TTY, NULL);
|
||||
#endif /* __FreeBSD__ */
|
||||
|
||||
|
|
|
@ -52,3 +52,7 @@ DRIVER_MODULE(tdfx, pci, tdfx_driver, tdfx_devclass, 0, 0);
|
|||
#elif defined(__NetBSD__)
|
||||
CFDRIVER_DECL(tdfx, DV_TTY, NULL);
|
||||
#endif /* __FreeBSD__ */
|
||||
|
||||
void DRM(driver_register_fns)(drm_device_t *dev)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -105,12 +105,6 @@
|
|||
#ifndef __HAVE_IRQ
|
||||
#define __HAVE_IRQ 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_WAITLIST
|
||||
#define __HAVE_DMA_WAITLIST 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_FREELIST
|
||||
#define __HAVE_DMA_FREELIST 0
|
||||
#endif
|
||||
|
||||
#define __REALLY_HAVE_AGP (__HAVE_AGP && (defined(CONFIG_AGP) || \
|
||||
defined(CONFIG_AGP_MODULE)))
|
||||
|
@ -224,54 +218,6 @@
|
|||
/*@}*/
|
||||
|
||||
|
||||
/***********************************************************************/
|
||||
/** \name Mapping helper macros */
|
||||
/*@{*/
|
||||
|
||||
#define DRM_IOREMAP(map, dev) \
|
||||
(map)->handle = DRM(ioremap)( (map)->offset, (map)->size, (dev) )
|
||||
|
||||
#define DRM_IOREMAP_NOCACHE(map, dev) \
|
||||
(map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size, (dev))
|
||||
|
||||
#define DRM_IOREMAPFREE(map, dev) \
|
||||
do { \
|
||||
if ( (map)->handle && (map)->size ) \
|
||||
DRM(ioremapfree)( (map)->handle, (map)->size, (dev) ); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* Find mapping.
|
||||
*
|
||||
* \param _map matching mapping if found, untouched otherwise.
|
||||
* \param _o offset.
|
||||
*
|
||||
* Expects the existence of a local variable named \p dev pointing to the
|
||||
* drm_device structure.
|
||||
*/
|
||||
#define DRM_FIND_MAP(_map, _o) \
|
||||
do { \
|
||||
struct list_head *_list; \
|
||||
list_for_each( _list, &dev->maplist->head ) { \
|
||||
drm_map_list_t *_entry = list_entry( _list, drm_map_list_t, head ); \
|
||||
if ( _entry->map && \
|
||||
_entry->map->offset == (_o) ) { \
|
||||
(_map) = _entry->map; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
/**
|
||||
* Drop mapping.
|
||||
*
|
||||
* \sa #DRM_FIND_MAP.
|
||||
*/
|
||||
#define DRM_DROP_MAP(_map)
|
||||
|
||||
/*@}*/
|
||||
|
||||
|
||||
/***********************************************************************/
|
||||
/** \name Internal types and structures */
|
||||
/*@{*/
|
||||
|
@ -569,6 +515,35 @@ typedef struct drm_vbl_sig {
|
|||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* DRM device functions structure
|
||||
*/
|
||||
struct drm_device;
|
||||
|
||||
struct drm_driver_fn {
|
||||
int (*preinit)(struct drm_device *, unsigned long flags);
|
||||
int (*postinit)(struct drm_device *, unsigned long flags);
|
||||
void (*prerelease)(struct drm_device *, struct file *filp);
|
||||
void (*pretakedown)(struct drm_device *);
|
||||
int (*postcleanup)(struct drm_device *);
|
||||
int (*presetup)(struct drm_device *);
|
||||
int (*postsetup)(struct drm_device *);
|
||||
void (*open_helper)(struct drm_device *, drm_file_t *);
|
||||
void (*release)(struct drm_device *, struct file *filp);
|
||||
void (*dma_ready)(struct drm_device *);
|
||||
int (*dma_quiescent)(struct drm_device *);
|
||||
int (*dma_flush_block_and_flush)(struct drm_device *, int context, drm_lock_flags_t flags);
|
||||
int (*dma_flush_unblock)(struct drm_device *, int context, drm_lock_flags_t flags);
|
||||
int (*context_ctor)(struct drm_device *dev, int context);
|
||||
int (*context_dtor)(struct drm_device *dev, int context);
|
||||
int (*kernel_context_switch)(struct drm_device *dev, int old, int new);
|
||||
int (*kernel_context_switch_unlock)(struct drm_device *dev);
|
||||
int (*dma_schedule)(struct drm_device *dev, int locked);
|
||||
int (*waitlist_destroy)(drm_waitlist_t *bl);
|
||||
int (*freelist_create)(drm_freelist_t *bl, int count);
|
||||
int (*freelist_put)(struct drm_device *dev, drm_freelist_t *bl, drm_buf_t *buf);
|
||||
int (*freelist_destroy)(drm_freelist_t *bl);
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM device structure.
|
||||
|
@ -704,8 +679,12 @@ typedef struct drm_device {
|
|||
sigset_t sigmask;
|
||||
|
||||
int need_reset; /**< secondary device needing reset */
|
||||
struct drm_driver_fn fn_tbl;
|
||||
drm_local_map_t *agp_buffer_map;
|
||||
int dev_priv_size;
|
||||
} drm_device_t;
|
||||
|
||||
extern void DRM(driver_register_fns)(struct drm_device *dev);
|
||||
|
||||
/******************************************************************/
|
||||
/** \name Internal function definitions */
|
||||
|
@ -960,6 +939,40 @@ extern void *DRM(pci_alloc)(drm_device_t *dev, size_t size,
|
|||
extern void DRM(pci_free)(drm_device_t *dev, size_t size,
|
||||
void *vaddr, dma_addr_t busaddr);
|
||||
|
||||
|
||||
/* Inline replacements for DRM_IOREMAP macros */
|
||||
static __inline__ void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
map->handle = DRM(ioremap)( map->offset, map->size, dev );
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_ioremap_nocache(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
map->handle = DRM(ioremap_nocache)(map->offset, map->size, dev);
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
if ( map->handle && map->size )
|
||||
DRM(ioremapfree)( map->handle, map->size, dev );
|
||||
}
|
||||
|
||||
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned long offset)
|
||||
{
|
||||
struct list_head *_list;
|
||||
list_for_each( _list, &dev->maplist->head ) {
|
||||
drm_map_list_t *_entry = list_entry( _list, drm_map_list_t, head );
|
||||
if ( _entry->map &&
|
||||
_entry->map->offset == offset ) {
|
||||
return _entry->map;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_dropmap(struct drm_map *map)
|
||||
{
|
||||
}
|
||||
/*@}*/
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -45,18 +45,6 @@
|
|||
#define __HAVE_SG 0
|
||||
#endif
|
||||
|
||||
#ifndef DRIVER_BUF_PRIV_T
|
||||
#define DRIVER_BUF_PRIV_T u32
|
||||
#endif
|
||||
#ifndef DRIVER_AGP_BUFFERS_MAP
|
||||
#if __HAVE_AGP && __HAVE_DMA
|
||||
#error "You must define DRIVER_AGP_BUFFERS_MAP()"
|
||||
#else
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* Compute size order. Returns the exponent of the smaller power of two which
|
||||
* is greater or equal to given number.
|
||||
|
@ -316,7 +304,7 @@ int DRM(rmmap)(struct inode *inode, struct file *filp,
|
|||
*
|
||||
* Frees any pages and buffers associated with the given entry.
|
||||
*/
|
||||
static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
|
||||
static void DRM(cleanup_buf_error)(drm_device_t *dev, drm_buf_entry_t *entry)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -349,9 +337,8 @@ static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
|
|||
sizeof(*entry->buflist),
|
||||
DRM_MEM_BUFS);
|
||||
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_destroy)(&entry->freelist);
|
||||
#endif
|
||||
if (dev->fn_tbl.freelist_destroy)
|
||||
dev->fn_tbl.freelist_destroy(&entry->freelist);
|
||||
|
||||
entry->buf_count = 0;
|
||||
}
|
||||
|
@ -474,13 +461,13 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
|
|||
init_waitqueue_head( &buf->dma_wait );
|
||||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
|
||||
buf->dev_priv_size = dev->dev_priv_size;
|
||||
buf->dev_private = DRM(alloc)( buf->dev_priv_size,
|
||||
DRM_MEM_BUFS );
|
||||
if(!buf->dev_private) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
entry->buf_count = count;
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
|
@ -504,7 +491,7 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
|
|||
DRM_MEM_BUFS );
|
||||
if(!temp_buflist) {
|
||||
/* Free the entry because it isn't valid */
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
|
@ -521,12 +508,14 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
|
|||
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
|
||||
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
|
||||
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_create)( &entry->freelist, entry->buf_count );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
|
||||
if (dev->fn_tbl.freelist_create)
|
||||
{
|
||||
dev->fn_tbl.freelist_create( &entry->freelist, entry->buf_count);
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
dev->fn_tbl.freelist_put( dev, &entry->freelist, &entry->buflist[i] );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
up( &dev->struct_sem );
|
||||
|
||||
request.count = entry->buf_count;
|
||||
|
@ -665,7 +654,7 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
|
|||
/* Set count correctly so we free the proper amount. */
|
||||
entry->buf_count = count;
|
||||
entry->seg_count = count;
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
DRM(free)( temp_pagelist,
|
||||
(dma->page_count + (count << page_order))
|
||||
* sizeof(*dma->pagelist),
|
||||
|
@ -699,14 +688,14 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
|
|||
init_waitqueue_head( &buf->dma_wait );
|
||||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
|
||||
buf->dev_priv_size = dev->dev_priv_size;
|
||||
buf->dev_private = DRM(alloc)( dev->dev_priv_size,
|
||||
DRM_MEM_BUFS );
|
||||
if(!buf->dev_private) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
entry->buf_count = count;
|
||||
entry->seg_count = count;
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
DRM(free)( temp_pagelist,
|
||||
(dma->page_count + (count << page_order))
|
||||
* sizeof(*dma->pagelist),
|
||||
|
@ -730,7 +719,7 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
|
|||
DRM_MEM_BUFS );
|
||||
if (!temp_buflist) {
|
||||
/* Free the entry because it isn't valid */
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
DRM(free)( temp_pagelist,
|
||||
(dma->page_count + (count << page_order))
|
||||
* sizeof(*dma->pagelist),
|
||||
|
@ -760,12 +749,14 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
|
|||
dma->page_count += entry->seg_count << page_order;
|
||||
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
|
||||
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_create)( &entry->freelist, entry->buf_count );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
|
||||
if (dev->fn_tbl.freelist_create)
|
||||
{
|
||||
dev->fn_tbl.freelist_create( &entry->freelist, entry->buf_count);
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
dev->fn_tbl.freelist_put( dev, &entry->freelist, &entry->buflist[i] );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
up( &dev->struct_sem );
|
||||
|
||||
request.count = entry->buf_count;
|
||||
|
@ -883,13 +874,13 @@ int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
|
|||
init_waitqueue_head( &buf->dma_wait );
|
||||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
|
||||
buf->dev_priv_size = dev->dev_priv_size;
|
||||
buf->dev_private = DRM(alloc)( dev->dev_priv_size,
|
||||
DRM_MEM_BUFS );
|
||||
if(!buf->dev_private) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
entry->buf_count = count;
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
|
@ -914,7 +905,7 @@ int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
|
|||
DRM_MEM_BUFS );
|
||||
if(!temp_buflist) {
|
||||
/* Free the entry because it isn't valid */
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
|
@ -931,12 +922,14 @@ int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
|
|||
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
|
||||
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
|
||||
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_create)( &entry->freelist, entry->buf_count );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
|
||||
if (dev->fn_tbl.freelist_create)
|
||||
{
|
||||
dev->fn_tbl.freelist_create( &entry->freelist, entry->buf_count);
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
dev->fn_tbl.freelist_put( dev, &entry->freelist, &entry->buflist[i] );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
up( &dev->struct_sem );
|
||||
|
||||
request.count = entry->buf_count;
|
||||
|
@ -1222,7 +1215,7 @@ int DRM(mapbufs)( struct inode *inode, struct file *filp,
|
|||
if ( request.count >= dma->buf_count ) {
|
||||
if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
|
||||
(__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
|
||||
drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
|
||||
drm_map_t *map = dev->agp_buffer_map;
|
||||
|
||||
if ( !map ) {
|
||||
retcode = -EINVAL;
|
||||
|
|
|
@ -420,10 +420,13 @@ int DRM(addctx)( struct inode *inode, struct file *filp,
|
|||
/* Should this return -EBUSY instead? */
|
||||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DRIVER_CTX_CTOR
|
||||
|
||||
if ( ctx.handle != DRM_KERNEL_CONTEXT )
|
||||
DRIVER_CTX_CTOR(dev, ctx.handle);
|
||||
#endif
|
||||
{
|
||||
if (dev->fn_tbl.context_ctor)
|
||||
dev->fn_tbl.context_ctor(dev, ctx.handle);
|
||||
}
|
||||
|
||||
ctx_entry = DRM(alloc)( sizeof(*ctx_entry), DRM_MEM_CTXLIST );
|
||||
if ( !ctx_entry ) {
|
||||
DRM_DEBUG("out of memory\n");
|
||||
|
@ -555,9 +558,8 @@ int DRM(rmctx)( struct inode *inode, struct file *filp,
|
|||
priv->remove_auth_on_close = 1;
|
||||
}
|
||||
if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
|
||||
#ifdef DRIVER_CTX_DTOR
|
||||
DRIVER_CTX_DTOR(dev, ctx.handle);
|
||||
#endif
|
||||
if (dev->fn_tbl.context_ctor)
|
||||
dev->fn_tbl.context_ctor(dev, ctx.handle);
|
||||
DRM(ctxbitmap_free)( dev, ctx.handle );
|
||||
}
|
||||
|
||||
|
|
|
@ -117,9 +117,9 @@ void DRM(dma_takedown)(drm_device_t *dev)
|
|||
dma->bufs[i].buf_count *
|
||||
sizeof(*dma->bufs[0].buflist),
|
||||
DRM_MEM_BUFS);
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_destroy)(&dma->bufs[i].freelist);
|
||||
#endif
|
||||
|
||||
if (dev->fn_tbl.freelist_destroy)
|
||||
dev->fn_tbl.freelist_destroy(&dma->bufs[i].freelist);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -159,16 +159,13 @@ void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
|
|||
if ( __HAVE_DMA_WAITQUEUE && waitqueue_active(&buf->dma_wait)) {
|
||||
wake_up_interruptible(&buf->dma_wait);
|
||||
}
|
||||
#if __HAVE_DMA_FREELIST
|
||||
else {
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
/* If processes are waiting, the last one
|
||||
to wake will put the buffer on the free
|
||||
list. If no processes are waiting, we
|
||||
put the buffer on the freelist here. */
|
||||
DRM(freelist_put)(dev, &dma->bufs[buf->order].freelist, buf);
|
||||
}
|
||||
#endif
|
||||
/* If processes are waiting, the last one
|
||||
to wake will put the buffer on the free
|
||||
list. If no processes are waiting, we
|
||||
put the buffer on the freelist here. */
|
||||
else if (dev->fn_tbl.freelist_put)
|
||||
dev->fn_tbl.freelist_put(dev, &dev->dma->bufs[buf->order].freelist, buf);
|
||||
|
||||
}
|
||||
|
||||
#if !__HAVE_DMA_RECLAIM
|
||||
|
|
|
@ -67,82 +67,16 @@
|
|||
#ifndef __HAVE_MULTIPLE_DMA_QUEUES
|
||||
#define __HAVE_MULTIPLE_DMA_QUEUES 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_SCHEDULE
|
||||
#define __HAVE_DMA_SCHEDULE 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_FLUSH
|
||||
#define __HAVE_DMA_FLUSH 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_READY
|
||||
#define __HAVE_DMA_READY 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_QUIESCENT
|
||||
#define __HAVE_DMA_QUIESCENT 0
|
||||
#endif
|
||||
#ifndef __HAVE_RELEASE
|
||||
#define __HAVE_RELEASE 0
|
||||
#endif
|
||||
#ifndef __HAVE_COUNTERS
|
||||
#define __HAVE_COUNTERS 0
|
||||
#endif
|
||||
#ifndef __HAVE_SG
|
||||
#define __HAVE_SG 0
|
||||
#endif
|
||||
/* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the drm modules in
|
||||
* the DRI cvs tree, but it is required by the kernel tree's sparc
|
||||
* driver.
|
||||
*/
|
||||
#ifndef __HAVE_KERNEL_CTX_SWITCH
|
||||
#define __HAVE_KERNEL_CTX_SWITCH 0
|
||||
#endif
|
||||
#ifndef __HAVE_DRIVER_FOPS_READ
|
||||
#define __HAVE_DRIVER_FOPS_READ 0
|
||||
#endif
|
||||
#ifndef __HAVE_DRIVER_FOPS_POLL
|
||||
#define __HAVE_DRIVER_FOPS_POLL 0
|
||||
#endif
|
||||
|
||||
#ifndef DRIVER_PREINIT
|
||||
#define DRIVER_PREINIT(dev, flags) 0
|
||||
#endif
|
||||
#ifndef DRIVER_POSTINIT
|
||||
#define DRIVER_POSTINIT(dev, flags) 0
|
||||
#endif
|
||||
#ifndef DRIVER_PRERELEASE
|
||||
#define DRIVER_PRERELEASE()
|
||||
#endif
|
||||
#ifndef DRIVER_PRETAKEDOWN
|
||||
#define DRIVER_PRETAKEDOWN(dev)
|
||||
#endif
|
||||
#ifndef DRIVER_POSTCLEANUP
|
||||
#define DRIVER_POSTCLEANUP(dev)
|
||||
#endif
|
||||
#ifndef DRIVER_PRESETUP
|
||||
#define DRIVER_PRESETUP()
|
||||
#endif
|
||||
#ifndef DRIVER_POSTSETUP
|
||||
#define DRIVER_POSTSETUP()
|
||||
#endif
|
||||
#ifndef DRIVER_IOCTLS
|
||||
#define DRIVER_IOCTLS
|
||||
#endif
|
||||
#ifndef DRIVER_OPEN_HELPER
|
||||
#define DRIVER_OPEN_HELPER( priv, dev )
|
||||
#endif
|
||||
#ifndef DRIVER_FOPS
|
||||
#define DRIVER_FOPS \
|
||||
struct file_operations DRM(fops) = { \
|
||||
.owner = THIS_MODULE, \
|
||||
.open = DRM(open), \
|
||||
.flush = DRM(flush), \
|
||||
.release = DRM(release), \
|
||||
.ioctl = DRM(ioctl), \
|
||||
.mmap = DRM(mmap), \
|
||||
.fasync = DRM(fasync), \
|
||||
.poll = DRM(poll), \
|
||||
.read = DRM(read), \
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __exit drm_cleanup( drm_device_t *dev );
|
||||
|
||||
|
@ -180,10 +114,20 @@ drm_device_t DRM(device)[MAX_DEVICES];
|
|||
int DRM(numdevs) = 0;
|
||||
int DRM(fb_loaded) = 0;
|
||||
|
||||
DRIVER_FOPS;
|
||||
struct file_operations DRM(fops) = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = DRM(open),
|
||||
.flush = DRM(flush),
|
||||
.release = DRM(release),
|
||||
.ioctl = DRM(ioctl),
|
||||
.mmap = DRM(mmap),
|
||||
.fasync = DRM(fasync),
|
||||
.poll = DRM(poll),
|
||||
.read = DRM(read),
|
||||
};
|
||||
|
||||
/** Ioctl table */
|
||||
static drm_ioctl_desc_t DRM(ioctls)[] = {
|
||||
drm_ioctl_desc_t DRM(ioctls)[] = {
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { DRM(getunique), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { DRM(getmagic), 0, 0 },
|
||||
|
@ -222,12 +166,7 @@ static drm_ioctl_desc_t DRM(ioctls)[] = {
|
|||
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
|
||||
|
||||
#if __HAVE_DMA_FLUSH
|
||||
/* Gamma only, really */
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
|
||||
#else
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(noop), 1, 0 },
|
||||
#endif
|
||||
|
||||
#if __HAVE_DMA
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
|
||||
|
@ -279,7 +218,9 @@ static int DRM(setup)( drm_device_t *dev )
|
|||
{
|
||||
int i;
|
||||
|
||||
DRIVER_PRESETUP();
|
||||
if (dev->fn_tbl.presetup)
|
||||
dev->fn_tbl.presetup(dev);
|
||||
|
||||
atomic_set( &dev->ioctl_count, 0 );
|
||||
atomic_set( &dev->vma_count, 0 );
|
||||
dev->buf_use = 0;
|
||||
|
@ -325,9 +266,6 @@ static int DRM(setup)( drm_device_t *dev )
|
|||
#ifdef __HAVE_COUNTER14
|
||||
dev->types[14] = __HAVE_COUNTER14;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER15
|
||||
dev->types[14] = __HAVE_COUNTER14;
|
||||
#endif
|
||||
|
||||
for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
|
||||
atomic_set( &dev->counts[i], 0 );
|
||||
|
@ -385,7 +323,9 @@ static int DRM(setup)( drm_device_t *dev )
|
|||
* drm_select_queue fails between the time the interrupt is
|
||||
* initialized and the time the queues are initialized.
|
||||
*/
|
||||
DRIVER_POSTSETUP();
|
||||
if (dev->fn_tbl.postsetup)
|
||||
dev->fn_tbl.postsetup(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -410,7 +350,8 @@ static int DRM(takedown)( drm_device_t *dev )
|
|||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
DRIVER_PRETAKEDOWN(dev);
|
||||
if (dev->fn_tbl.pretakedown)
|
||||
dev->fn_tbl.pretakedown(dev);
|
||||
#if __HAVE_IRQ
|
||||
if ( dev->irq_enabled ) DRM(irq_uninstall)( dev );
|
||||
#endif
|
||||
|
@ -523,9 +464,9 @@ static int DRM(takedown)( drm_device_t *dev )
|
|||
#if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
|
||||
if ( dev->queuelist ) {
|
||||
for ( i = 0 ; i < dev->queue_count ; i++ ) {
|
||||
#if __HAVE_DMA_WAITLIST
|
||||
DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
|
||||
#endif
|
||||
if (dev->fn_tbl.waitlist_destroy)
|
||||
dev->fn_tbl.waitlist_destroy( &dev->queuelist[i]->waitlist);
|
||||
|
||||
if ( dev->queuelist[i] ) {
|
||||
DRM(free)( dev->queuelist[i],
|
||||
sizeof(*dev->queuelist[0]),
|
||||
|
@ -596,8 +537,13 @@ static int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
dev->pci_func = PCI_FUNC(pdev->devfn);
|
||||
dev->irq = pdev->irq;
|
||||
|
||||
if ((retcode = DRIVER_PREINIT(dev, ent->driver_data)))
|
||||
goto error_out_unreg;
|
||||
/* dev_priv_size can be changed by a driver in driver_register_fns */
|
||||
dev->dev_priv_size = sizeof(u32);
|
||||
DRM(driver_register_fns)(dev);
|
||||
|
||||
if (dev->fn_tbl.preinit)
|
||||
if ((retcode = dev->fn_tbl.preinit(dev, ent->driver_data)))
|
||||
goto error_out_unreg;
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
dev->agp = DRM(agp_init)();
|
||||
|
@ -643,9 +589,11 @@ static int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
dev->minor,
|
||||
pci_pretty_name(pdev)
|
||||
);
|
||||
|
||||
/* drivers add secondary heads here if needed */
|
||||
if ((retcode = DRIVER_POSTINIT(dev, ent->driver_data)))
|
||||
goto error_out_unreg;
|
||||
if (dev->fn_tbl.postinit)
|
||||
if ((retcode = dev->fn_tbl.postinit(dev, ent->driver_data)))
|
||||
goto error_out_unreg;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -765,7 +713,8 @@ static void __exit drm_cleanup( drm_device_t *dev )
|
|||
dev->agp = NULL;
|
||||
}
|
||||
#endif
|
||||
DRIVER_POSTCLEANUP(dev);
|
||||
if (dev->fn_tbl.postcleanup)
|
||||
dev->fn_tbl.postcleanup(dev);
|
||||
}
|
||||
|
||||
static void __exit drm_exit (void)
|
||||
|
@ -901,7 +850,8 @@ int DRM(release)( struct inode *inode, struct file *filp )
|
|||
|
||||
DRM_DEBUG( "open_count = %d\n", dev->open_count );
|
||||
|
||||
DRIVER_PRERELEASE();
|
||||
if (dev->fn_tbl.prerelease)
|
||||
dev->fn_tbl.prerelease(dev, filp);
|
||||
|
||||
/* ========================================================
|
||||
* Begin inline drm_release
|
||||
|
@ -916,9 +866,10 @@ int DRM(release)( struct inode *inode, struct file *filp )
|
|||
DRM_DEBUG( "File %p released, freeing lock for context %d\n",
|
||||
filp,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
|
||||
#if __HAVE_RELEASE
|
||||
DRIVER_RELEASE();
|
||||
#endif
|
||||
|
||||
if (dev->fn_tbl.release)
|
||||
dev->fn_tbl.release(dev, filp);
|
||||
|
||||
DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
|
||||
|
||||
|
@ -927,8 +878,7 @@ int DRM(release)( struct inode *inode, struct file *filp )
|
|||
processed via a callback to the X
|
||||
server. */
|
||||
}
|
||||
#if __HAVE_RELEASE
|
||||
else if ( priv->lock_count && dev->lock.hw_lock ) {
|
||||
else if ( dev->fn_tbl.release && priv->lock_count && dev->lock.hw_lock ) {
|
||||
/* The lock is required to reclaim buffers */
|
||||
DECLARE_WAITQUEUE( entry, current );
|
||||
|
||||
|
@ -957,12 +907,14 @@ int DRM(release)( struct inode *inode, struct file *filp )
|
|||
current->state = TASK_RUNNING;
|
||||
remove_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
if( !retcode ) {
|
||||
DRIVER_RELEASE();
|
||||
if (dev->fn_tbl.release)
|
||||
dev->fn_tbl.release(dev, filp);
|
||||
DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
}
|
||||
}
|
||||
#elif __HAVE_DMA
|
||||
|
||||
#if __HAVE_DMA
|
||||
DRM(reclaim_buffers)( filp );
|
||||
#endif
|
||||
|
||||
|
@ -975,9 +927,8 @@ int DRM(release)( struct inode *inode, struct file *filp )
|
|||
list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) {
|
||||
if ( pos->tag == priv &&
|
||||
pos->handle != DRM_KERNEL_CONTEXT ) {
|
||||
#ifdef DRIVER_CTX_DTOR
|
||||
DRIVER_CTX_DTOR( dev, pos->handle);
|
||||
#endif
|
||||
if (dev->fn_tbl.context_dtor)
|
||||
dev->fn_tbl.context_dtor(dev, pos->handle);
|
||||
#if __HAVE_CTX_BITMAP
|
||||
DRM(ctxbitmap_free)( dev, pos->handle );
|
||||
#endif
|
||||
|
@ -1134,9 +1085,8 @@ int DRM(lock)( struct inode *inode, struct file *filp,
|
|||
q = dev->queuelist[lock.context];
|
||||
#endif
|
||||
|
||||
#if __HAVE_DMA_FLUSH
|
||||
ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
|
||||
#endif
|
||||
if (dev->fn_tbl.dma_flush_block_and_flush)
|
||||
ret = dev->fn_tbl.dma_flush_block_and_flush(dev, lock.context, lock.flags);
|
||||
if ( !ret ) {
|
||||
add_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
for (;;) {
|
||||
|
@ -1165,9 +1115,8 @@ int DRM(lock)( struct inode *inode, struct file *filp,
|
|||
remove_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
}
|
||||
|
||||
#if __HAVE_DMA_FLUSH
|
||||
DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
|
||||
#endif
|
||||
if (dev->fn_tbl.dma_flush_unblock)
|
||||
dev->fn_tbl.dma_flush_unblock(dev, lock.context, lock.flags);
|
||||
|
||||
if ( !ret ) {
|
||||
sigemptyset( &dev->sigmask );
|
||||
|
@ -1180,26 +1129,17 @@ int DRM(lock)( struct inode *inode, struct file *filp,
|
|||
block_all_signals( DRM(notifier),
|
||||
&dev->sigdata, &dev->sigmask );
|
||||
|
||||
#if __HAVE_DMA_READY
|
||||
if ( lock.flags & _DRM_LOCK_READY ) {
|
||||
DRIVER_DMA_READY();
|
||||
if (dev->fn_tbl.dma_ready && (lock.flags & _DRM_LOCK_READY))
|
||||
dev->fn_tbl.dma_ready(dev);
|
||||
|
||||
if ( dev->fn_tbl.dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT ))
|
||||
return dev->fn_tbl.dma_quiescent(dev);
|
||||
|
||||
|
||||
if ( dev->fn_tbl.kernel_context_switch && dev->last_context != lock.context ) {
|
||||
dev->fn_tbl.kernel_context_switch(dev, dev->last_context,
|
||||
lock.context);
|
||||
}
|
||||
#endif
|
||||
#if __HAVE_DMA_QUIESCENT
|
||||
if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
|
||||
DRIVER_DMA_QUIESCENT();
|
||||
}
|
||||
#endif
|
||||
/* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the
|
||||
* drm modules in the DRI cvs tree, but it is required
|
||||
* by the Sparc driver.
|
||||
*/
|
||||
#if __HAVE_KERNEL_CTX_SWITCH
|
||||
if ( dev->last_context != lock.context ) {
|
||||
DRM(context_switch)(dev, dev->last_context,
|
||||
lock.context);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
|
||||
|
@ -1236,40 +1176,21 @@ int DRM(unlock)( struct inode *inode, struct file *filp,
|
|||
|
||||
atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
|
||||
|
||||
/* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the drm
|
||||
* modules in the DRI cvs tree, but it is required by the
|
||||
* Sparc driver.
|
||||
*/
|
||||
#if __HAVE_KERNEL_CTX_SWITCH
|
||||
/* We no longer really hold it, but if we are the next
|
||||
* agent to request it then we should just be able to
|
||||
* take it immediately and not eat the ioctl.
|
||||
*/
|
||||
dev->lock.filp = 0;
|
||||
if (dev->fn_tbl.kernel_context_switch_unlock)
|
||||
dev->fn_tbl.kernel_context_switch_unlock(dev);
|
||||
else
|
||||
{
|
||||
__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
|
||||
unsigned int old, new, prev, ctx;
|
||||
DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
|
||||
ctx = lock.context;
|
||||
do {
|
||||
old = *plock;
|
||||
new = ctx;
|
||||
prev = cmpxchg(plock, old, new);
|
||||
} while (prev != old);
|
||||
}
|
||||
wake_up_interruptible(&dev->lock.lock_queue);
|
||||
#else
|
||||
DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
#if __HAVE_DMA_SCHEDULE
|
||||
DRM(dma_schedule)( dev, 1 );
|
||||
#endif
|
||||
if (dev->fn_tbl.dma_schedule)
|
||||
dev->fn_tbl.dma_schedule(dev, 1);
|
||||
|
||||
if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT ) ) {
|
||||
DRM_ERROR( "\n" );
|
||||
if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT ) ) {
|
||||
DRM_ERROR( "\n" );
|
||||
}
|
||||
}
|
||||
#endif /* !__HAVE_KERNEL_CTX_SWITCH */
|
||||
|
||||
unblock_all_signals();
|
||||
return 0;
|
||||
|
|
|
@ -73,7 +73,8 @@ int DRM(open_helper)(struct inode *inode, struct file *filp, drm_device_t *dev)
|
|||
priv->authenticated = capable(CAP_SYS_ADMIN);
|
||||
priv->lock_count = 0;
|
||||
|
||||
DRIVER_OPEN_HELPER( priv, dev );
|
||||
if (dev->fn_tbl.open_helper)
|
||||
dev->fn_tbl.open_helper(dev, priv);
|
||||
|
||||
down(&dev->struct_sem);
|
||||
if (!dev->file_last) {
|
||||
|
@ -131,19 +132,15 @@ int DRM(fasync)(int fd, struct file *filp, int on)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if !__HAVE_DRIVER_FOPS_POLL
|
||||
/** No-op. */
|
||||
unsigned int DRM(poll)(struct file *filp, struct poll_table_struct *wait)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#if !__HAVE_DRIVER_FOPS_READ
|
||||
/** No-op. */
|
||||
ssize_t DRM(read)(struct file *filp, char __user *buf, size_t count, loff_t *off)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -199,6 +199,7 @@ static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *d
|
|||
iounmap(pt);
|
||||
}
|
||||
|
||||
|
||||
#if DEBUG_MEMORY
|
||||
#include "drm_memory_debug.h"
|
||||
#else
|
||||
|
|
|
@ -8,9 +8,5 @@
|
|||
*/
|
||||
#define DRM(x) ffb_##x
|
||||
|
||||
/* General customization:
|
||||
*/
|
||||
#define __HAVE_KERNEL_CTX_SWITCH 1
|
||||
#define __HAVE_RELEASE 1
|
||||
#endif
|
||||
|
||||
|
|
|
@ -537,3 +537,63 @@ int DRM(rmctx)(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ffb_driver_release(drm_device_t *dev)
|
||||
{
|
||||
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
|
||||
int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock);
|
||||
int idx;
|
||||
|
||||
idx = context - 1;
|
||||
if (fpriv &&
|
||||
context != DRM_KERNEL_CONTEXT &&
|
||||
fpriv->hw_state[idx] != NULL) {
|
||||
kfree(fpriv->hw_state[idx]);
|
||||
fpriv->hw_state[idx] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int ffb_driver_presetup(drm_device_t *dev)
|
||||
{
|
||||
int ret;
|
||||
ret = ffb_presetup(dev);
|
||||
if (_ret != 0) return ret;
|
||||
}
|
||||
|
||||
static void ffb_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
if (dev->dev_private) kfree(dev->dev_private);
|
||||
}
|
||||
|
||||
static void ffb_driver_postcleanup(drm_device_t *dev)
|
||||
{
|
||||
if (ffb_position != NULL) kfree(ffb_position);
|
||||
}
|
||||
|
||||
static int ffb_driver_kernel_context_switch_unlock(struct drm_device *dev)
|
||||
{
|
||||
dev->lock.filp = 0;
|
||||
{
|
||||
__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
|
||||
unsigned int old, new, prev, ctx;
|
||||
|
||||
ctx = lock.context;
|
||||
do {
|
||||
old = *plock;
|
||||
new = ctx;
|
||||
prev = cmpxchg(plock, old, new);
|
||||
} while (prev != old);
|
||||
}
|
||||
wake_up_interruptible(&dev->lock.lock_queue);
|
||||
}
|
||||
|
||||
static void ffb_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
DRM(fops).get_unmapped_area = ffb_get_unmapped_area;
|
||||
dev->fn_tbl.release = ffb_driver_release;
|
||||
dev->fn_tbl.presetup = ffb_driver_presetup;
|
||||
dev->fn_tbl.pretakedown = ffb_driver_pretakedown;
|
||||
dev->fn_tbl.postcleanup = ffb_driver_postcleanup;
|
||||
dev->fn_tbl.kernel_context_switch = ffb_context_switch;
|
||||
dev->fn_tbl.kernel_context_switch_unlock = ffb_driver_kernel_context_switch_unlock;
|
||||
}
|
||||
|
|
|
@ -26,53 +26,7 @@
|
|||
#define DRIVER_MINOR 0
|
||||
#define DRIVER_PATCHLEVEL 1
|
||||
|
||||
#define DRIVER_FOPS \
|
||||
static struct file_operations DRM(fops) = { \
|
||||
.owner = THIS_MODULE, \
|
||||
.open = DRM(open), \
|
||||
.flush = DRM(flush), \
|
||||
.release = DRM(release), \
|
||||
.ioctl = DRM(ioctl), \
|
||||
.mmap = DRM(mmap), \
|
||||
.read = DRM(read), \
|
||||
.fasync = DRM(fasync), \
|
||||
.poll = DRM(poll), \
|
||||
.get_unmapped_area = ffb_get_unmapped_area, \
|
||||
}
|
||||
|
||||
#define DRIVER_COUNT_CARDS() ffb_count_card_instances()
|
||||
/* Allocate private structure and fill it */
|
||||
#define DRIVER_PRESETUP() do { \
|
||||
int _ret; \
|
||||
_ret = ffb_presetup(dev); \
|
||||
if (_ret != 0) return _ret; \
|
||||
} while(0)
|
||||
|
||||
/* Free private structure */
|
||||
#define DRIVER_PRETAKEDOWN() do { \
|
||||
if (dev->dev_private) kfree(dev->dev_private); \
|
||||
} while(0)
|
||||
|
||||
#define DRIVER_POSTCLEANUP() do { \
|
||||
if (ffb_position != NULL) kfree(ffb_position); \
|
||||
} while(0)
|
||||
|
||||
/* We have to free up the rogue hw context state holding error or
|
||||
* else we will leak it.
|
||||
*/
|
||||
#define DRIVER_RELEASE() do { \
|
||||
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; \
|
||||
int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock); \
|
||||
int idx; \
|
||||
\
|
||||
idx = context - 1; \
|
||||
if (fpriv && \
|
||||
context != DRM_KERNEL_CONTEXT && \
|
||||
fpriv->hw_state[idx] != NULL) { \
|
||||
kfree(fpriv->hw_state[idx]); \
|
||||
fpriv->hw_state[idx] = NULL; \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
/* For mmap customization */
|
||||
#define DRIVER_GET_MAP_OFS() (map->offset & 0xffffffff)
|
||||
|
@ -275,11 +229,11 @@ static drm_map_t *ffb_find_map(struct file *filp, unsigned long off)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static unsigned long ffb_get_unmapped_area(struct file *filp,
|
||||
unsigned long hint,
|
||||
unsigned long len,
|
||||
unsigned long pgoff,
|
||||
unsigned long flags)
|
||||
unsigned long ffb_get_unmapped_area(struct file *filp,
|
||||
unsigned long hint,
|
||||
unsigned long len,
|
||||
unsigned long pgoff,
|
||||
unsigned long flags)
|
||||
{
|
||||
drm_map_t *map = ffb_find_map(filp, pgoff << PAGE_SHIFT);
|
||||
unsigned long addr = -ENOMEM;
|
||||
|
|
|
@ -274,3 +274,10 @@ typedef struct ffb_dev_priv {
|
|||
/* Context table. */
|
||||
struct ffb_hw_context *hw_state[FFB_MAX_CTXS];
|
||||
} ffb_dev_priv_t;
|
||||
|
||||
extern struct file_operations DRM(fops);
|
||||
extern unsigned long ffb_get_unmapped_area(struct file *filp,
|
||||
unsigned long hint,
|
||||
unsigned long len,
|
||||
unsigned long pgoff,
|
||||
unsigned long flags);
|
||||
|
|
|
@ -371,15 +371,15 @@ static int i810_dma_initialize(drm_device_t *dev,
|
|||
DRM_ERROR("can not find sarea!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
|
||||
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
|
||||
if (!dev_priv->mmio_map) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i810_dma_cleanup(dev);
|
||||
DRM_ERROR("can not find mmio map!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
|
||||
if (!dev_priv->buffer_map) {
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if (!dev->agp_buffer_map) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i810_dma_cleanup(dev);
|
||||
DRM_ERROR("can not find dma buffer map!\n");
|
||||
|
@ -1394,3 +1394,28 @@ int i810_flip_bufs(struct inode *inode, struct file *filp,
|
|||
i810_dma_dispatch_flip( dev );
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i810_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
i810_dma_cleanup( dev );
|
||||
}
|
||||
|
||||
static void i810_driver_release(drm_device_t *dev, struct file *filp)
|
||||
{
|
||||
i810_reclaim_buffers(filp);
|
||||
}
|
||||
|
||||
static int i810_driver_dma_quiescent(drm_device_t *dev)
|
||||
{
|
||||
i810_dma_quiescent( dev );
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i810_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
dev->dev_priv_size = sizeof(drm_i810_buf_priv_t);
|
||||
dev->fn_tbl.pretakedown = i810_driver_pretakedown;
|
||||
dev->fn_tbl.release = i810_driver_release;
|
||||
dev->fn_tbl.dma_quiescent = i810_driver_dma_quiescent;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,6 @@ typedef struct _drm_i810_ring_buffer{
|
|||
|
||||
typedef struct drm_i810_private {
|
||||
drm_map_t *sarea_map;
|
||||
drm_map_t *buffer_map;
|
||||
drm_map_t *mmio_map;
|
||||
|
||||
drm_i810_sarea_t *sarea_priv;
|
||||
|
|
|
@ -378,15 +378,15 @@ static int i830_dma_initialize(drm_device_t *dev,
|
|||
DRM_ERROR("can not find sarea!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
|
||||
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
|
||||
if(!dev_priv->mmio_map) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i830_dma_cleanup(dev);
|
||||
DRM_ERROR("can not find mmio map!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
|
||||
if(!dev_priv->buffer_map) {
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if(!dev->agp_buffer_map) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i830_dma_cleanup(dev);
|
||||
DRM_ERROR("can not find dma buffer map!\n");
|
||||
|
@ -1589,3 +1589,29 @@ int i830_setparam( struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void i830_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
i830_dma_cleanup( dev );
|
||||
}
|
||||
|
||||
static void i830_driver_release(drm_device_t *dev, struct file *filp)
|
||||
{
|
||||
i830_reclaim_buffers(filp);
|
||||
}
|
||||
|
||||
static int i830_driver_dma_quiescent(drm_device_t *dev)
|
||||
{
|
||||
i830_dma_quiescent( dev );
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i830_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
dev->dev_priv_size = sizeof(drm_i830_buf_priv_t);
|
||||
dev->fn_tbl.pretakedown = i830_driver_pretakedown;
|
||||
dev->fn_tbl.release = i830_driver_release;
|
||||
dev->fn_tbl.dma_quiescent = i830_driver_dma_quiescent;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,6 @@ typedef struct _drm_i830_ring_buffer{
|
|||
|
||||
typedef struct drm_i830_private {
|
||||
drm_map_t *sarea_map;
|
||||
drm_map_t *buffer_map;
|
||||
drm_map_t *mmio_map;
|
||||
|
||||
drm_i830_sarea_t *sarea_priv;
|
||||
|
|
|
@ -35,4 +35,6 @@
|
|||
#define SAVAGE_DEFAULT_USEC_TIMEOUT 10000
|
||||
#define SAVAGE_FREELIST_DEBUG 0
|
||||
|
||||
|
||||
void DRM(driver_register_fns)(drm_device_t *dev)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -46,3 +46,5 @@
|
|||
#include "drm_proc.h"
|
||||
#include "drm_vm.h"
|
||||
#include "drm_stub.h"
|
||||
|
||||
|
||||
|
|
|
@ -49,3 +49,8 @@
|
|||
#include "drm_proc.h"
|
||||
#include "drm_vm.h"
|
||||
#include "drm_stub.h"
|
||||
|
||||
void DRM(driver_register_fns)(drm_device_t *dev)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
121
linux/drmP.h
121
linux/drmP.h
|
@ -105,12 +105,6 @@
|
|||
#ifndef __HAVE_IRQ
|
||||
#define __HAVE_IRQ 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_WAITLIST
|
||||
#define __HAVE_DMA_WAITLIST 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_FREELIST
|
||||
#define __HAVE_DMA_FREELIST 0
|
||||
#endif
|
||||
|
||||
#define __REALLY_HAVE_AGP (__HAVE_AGP && (defined(CONFIG_AGP) || \
|
||||
defined(CONFIG_AGP_MODULE)))
|
||||
|
@ -224,54 +218,6 @@
|
|||
/*@}*/
|
||||
|
||||
|
||||
/***********************************************************************/
|
||||
/** \name Mapping helper macros */
|
||||
/*@{*/
|
||||
|
||||
#define DRM_IOREMAP(map, dev) \
|
||||
(map)->handle = DRM(ioremap)( (map)->offset, (map)->size, (dev) )
|
||||
|
||||
#define DRM_IOREMAP_NOCACHE(map, dev) \
|
||||
(map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size, (dev))
|
||||
|
||||
#define DRM_IOREMAPFREE(map, dev) \
|
||||
do { \
|
||||
if ( (map)->handle && (map)->size ) \
|
||||
DRM(ioremapfree)( (map)->handle, (map)->size, (dev) ); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* Find mapping.
|
||||
*
|
||||
* \param _map matching mapping if found, untouched otherwise.
|
||||
* \param _o offset.
|
||||
*
|
||||
* Expects the existence of a local variable named \p dev pointing to the
|
||||
* drm_device structure.
|
||||
*/
|
||||
#define DRM_FIND_MAP(_map, _o) \
|
||||
do { \
|
||||
struct list_head *_list; \
|
||||
list_for_each( _list, &dev->maplist->head ) { \
|
||||
drm_map_list_t *_entry = list_entry( _list, drm_map_list_t, head ); \
|
||||
if ( _entry->map && \
|
||||
_entry->map->offset == (_o) ) { \
|
||||
(_map) = _entry->map; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
/**
|
||||
* Drop mapping.
|
||||
*
|
||||
* \sa #DRM_FIND_MAP.
|
||||
*/
|
||||
#define DRM_DROP_MAP(_map)
|
||||
|
||||
/*@}*/
|
||||
|
||||
|
||||
/***********************************************************************/
|
||||
/** \name Internal types and structures */
|
||||
/*@{*/
|
||||
|
@ -569,6 +515,35 @@ typedef struct drm_vbl_sig {
|
|||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* DRM device functions structure
|
||||
*/
|
||||
struct drm_device;
|
||||
|
||||
struct drm_driver_fn {
|
||||
int (*preinit)(struct drm_device *, unsigned long flags);
|
||||
int (*postinit)(struct drm_device *, unsigned long flags);
|
||||
void (*prerelease)(struct drm_device *, struct file *filp);
|
||||
void (*pretakedown)(struct drm_device *);
|
||||
int (*postcleanup)(struct drm_device *);
|
||||
int (*presetup)(struct drm_device *);
|
||||
int (*postsetup)(struct drm_device *);
|
||||
void (*open_helper)(struct drm_device *, drm_file_t *);
|
||||
void (*release)(struct drm_device *, struct file *filp);
|
||||
void (*dma_ready)(struct drm_device *);
|
||||
int (*dma_quiescent)(struct drm_device *);
|
||||
int (*dma_flush_block_and_flush)(struct drm_device *, int context, drm_lock_flags_t flags);
|
||||
int (*dma_flush_unblock)(struct drm_device *, int context, drm_lock_flags_t flags);
|
||||
int (*context_ctor)(struct drm_device *dev, int context);
|
||||
int (*context_dtor)(struct drm_device *dev, int context);
|
||||
int (*kernel_context_switch)(struct drm_device *dev, int old, int new);
|
||||
int (*kernel_context_switch_unlock)(struct drm_device *dev);
|
||||
int (*dma_schedule)(struct drm_device *dev, int locked);
|
||||
int (*waitlist_destroy)(drm_waitlist_t *bl);
|
||||
int (*freelist_create)(drm_freelist_t *bl, int count);
|
||||
int (*freelist_put)(struct drm_device *dev, drm_freelist_t *bl, drm_buf_t *buf);
|
||||
int (*freelist_destroy)(drm_freelist_t *bl);
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM device structure.
|
||||
|
@ -704,8 +679,12 @@ typedef struct drm_device {
|
|||
sigset_t sigmask;
|
||||
|
||||
int need_reset; /**< secondary device needing reset */
|
||||
struct drm_driver_fn fn_tbl;
|
||||
drm_local_map_t *agp_buffer_map;
|
||||
int dev_priv_size;
|
||||
} drm_device_t;
|
||||
|
||||
extern void DRM(driver_register_fns)(struct drm_device *dev);
|
||||
|
||||
/******************************************************************/
|
||||
/** \name Internal function definitions */
|
||||
|
@ -960,6 +939,40 @@ extern void *DRM(pci_alloc)(drm_device_t *dev, size_t size,
|
|||
extern void DRM(pci_free)(drm_device_t *dev, size_t size,
|
||||
void *vaddr, dma_addr_t busaddr);
|
||||
|
||||
|
||||
/* Inline replacements for DRM_IOREMAP macros */
|
||||
static __inline__ void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
map->handle = DRM(ioremap)( map->offset, map->size, dev );
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_ioremap_nocache(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
map->handle = DRM(ioremap_nocache)(map->offset, map->size, dev);
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
if ( map->handle && map->size )
|
||||
DRM(ioremapfree)( map->handle, map->size, dev );
|
||||
}
|
||||
|
||||
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned long offset)
|
||||
{
|
||||
struct list_head *_list;
|
||||
list_for_each( _list, &dev->maplist->head ) {
|
||||
drm_map_list_t *_entry = list_entry( _list, drm_map_list_t, head );
|
||||
if ( _entry->map &&
|
||||
_entry->map->offset == offset ) {
|
||||
return _entry->map;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_dropmap(struct drm_map *map)
|
||||
{
|
||||
}
|
||||
/*@}*/
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -45,18 +45,6 @@
|
|||
#define __HAVE_SG 0
|
||||
#endif
|
||||
|
||||
#ifndef DRIVER_BUF_PRIV_T
|
||||
#define DRIVER_BUF_PRIV_T u32
|
||||
#endif
|
||||
#ifndef DRIVER_AGP_BUFFERS_MAP
|
||||
#if __HAVE_AGP && __HAVE_DMA
|
||||
#error "You must define DRIVER_AGP_BUFFERS_MAP()"
|
||||
#else
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* Compute size order. Returns the exponent of the smaller power of two which
|
||||
* is greater or equal to given number.
|
||||
|
@ -316,7 +304,7 @@ int DRM(rmmap)(struct inode *inode, struct file *filp,
|
|||
*
|
||||
* Frees any pages and buffers associated with the given entry.
|
||||
*/
|
||||
static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
|
||||
static void DRM(cleanup_buf_error)(drm_device_t *dev, drm_buf_entry_t *entry)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -349,9 +337,8 @@ static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
|
|||
sizeof(*entry->buflist),
|
||||
DRM_MEM_BUFS);
|
||||
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_destroy)(&entry->freelist);
|
||||
#endif
|
||||
if (dev->fn_tbl.freelist_destroy)
|
||||
dev->fn_tbl.freelist_destroy(&entry->freelist);
|
||||
|
||||
entry->buf_count = 0;
|
||||
}
|
||||
|
@ -474,13 +461,13 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
|
|||
init_waitqueue_head( &buf->dma_wait );
|
||||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
|
||||
buf->dev_priv_size = dev->dev_priv_size;
|
||||
buf->dev_private = DRM(alloc)( buf->dev_priv_size,
|
||||
DRM_MEM_BUFS );
|
||||
if(!buf->dev_private) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
entry->buf_count = count;
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
|
@ -504,7 +491,7 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
|
|||
DRM_MEM_BUFS );
|
||||
if(!temp_buflist) {
|
||||
/* Free the entry because it isn't valid */
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
|
@ -521,12 +508,14 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
|
|||
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
|
||||
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
|
||||
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_create)( &entry->freelist, entry->buf_count );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
|
||||
if (dev->fn_tbl.freelist_create)
|
||||
{
|
||||
dev->fn_tbl.freelist_create( &entry->freelist, entry->buf_count);
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
dev->fn_tbl.freelist_put( dev, &entry->freelist, &entry->buflist[i] );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
up( &dev->struct_sem );
|
||||
|
||||
request.count = entry->buf_count;
|
||||
|
@ -665,7 +654,7 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
|
|||
/* Set count correctly so we free the proper amount. */
|
||||
entry->buf_count = count;
|
||||
entry->seg_count = count;
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
DRM(free)( temp_pagelist,
|
||||
(dma->page_count + (count << page_order))
|
||||
* sizeof(*dma->pagelist),
|
||||
|
@ -699,14 +688,14 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
|
|||
init_waitqueue_head( &buf->dma_wait );
|
||||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
|
||||
buf->dev_priv_size = dev->dev_priv_size;
|
||||
buf->dev_private = DRM(alloc)( dev->dev_priv_size,
|
||||
DRM_MEM_BUFS );
|
||||
if(!buf->dev_private) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
entry->buf_count = count;
|
||||
entry->seg_count = count;
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
DRM(free)( temp_pagelist,
|
||||
(dma->page_count + (count << page_order))
|
||||
* sizeof(*dma->pagelist),
|
||||
|
@ -730,7 +719,7 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
|
|||
DRM_MEM_BUFS );
|
||||
if (!temp_buflist) {
|
||||
/* Free the entry because it isn't valid */
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
DRM(free)( temp_pagelist,
|
||||
(dma->page_count + (count << page_order))
|
||||
* sizeof(*dma->pagelist),
|
||||
|
@ -760,12 +749,14 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
|
|||
dma->page_count += entry->seg_count << page_order;
|
||||
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
|
||||
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_create)( &entry->freelist, entry->buf_count );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
|
||||
if (dev->fn_tbl.freelist_create)
|
||||
{
|
||||
dev->fn_tbl.freelist_create( &entry->freelist, entry->buf_count);
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
dev->fn_tbl.freelist_put( dev, &entry->freelist, &entry->buflist[i] );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
up( &dev->struct_sem );
|
||||
|
||||
request.count = entry->buf_count;
|
||||
|
@ -883,13 +874,13 @@ int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
|
|||
init_waitqueue_head( &buf->dma_wait );
|
||||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
|
||||
buf->dev_priv_size = dev->dev_priv_size;
|
||||
buf->dev_private = DRM(alloc)( dev->dev_priv_size,
|
||||
DRM_MEM_BUFS );
|
||||
if(!buf->dev_private) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
entry->buf_count = count;
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
|
@ -914,7 +905,7 @@ int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
|
|||
DRM_MEM_BUFS );
|
||||
if(!temp_buflist) {
|
||||
/* Free the entry because it isn't valid */
|
||||
DRM(cleanup_buf_error)(entry);
|
||||
DRM(cleanup_buf_error)(dev,entry);
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
|
@ -931,12 +922,14 @@ int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
|
|||
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
|
||||
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
|
||||
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_create)( &entry->freelist, entry->buf_count );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
|
||||
if (dev->fn_tbl.freelist_create)
|
||||
{
|
||||
dev->fn_tbl.freelist_create( &entry->freelist, entry->buf_count);
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
dev->fn_tbl.freelist_put( dev, &entry->freelist, &entry->buflist[i] );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
up( &dev->struct_sem );
|
||||
|
||||
request.count = entry->buf_count;
|
||||
|
@ -1222,7 +1215,7 @@ int DRM(mapbufs)( struct inode *inode, struct file *filp,
|
|||
if ( request.count >= dma->buf_count ) {
|
||||
if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
|
||||
(__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
|
||||
drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
|
||||
drm_map_t *map = dev->agp_buffer_map;
|
||||
|
||||
if ( !map ) {
|
||||
retcode = -EINVAL;
|
||||
|
|
|
@ -420,10 +420,13 @@ int DRM(addctx)( struct inode *inode, struct file *filp,
|
|||
/* Should this return -EBUSY instead? */
|
||||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DRIVER_CTX_CTOR
|
||||
|
||||
if ( ctx.handle != DRM_KERNEL_CONTEXT )
|
||||
DRIVER_CTX_CTOR(dev, ctx.handle);
|
||||
#endif
|
||||
{
|
||||
if (dev->fn_tbl.context_ctor)
|
||||
dev->fn_tbl.context_ctor(dev, ctx.handle);
|
||||
}
|
||||
|
||||
ctx_entry = DRM(alloc)( sizeof(*ctx_entry), DRM_MEM_CTXLIST );
|
||||
if ( !ctx_entry ) {
|
||||
DRM_DEBUG("out of memory\n");
|
||||
|
@ -555,9 +558,8 @@ int DRM(rmctx)( struct inode *inode, struct file *filp,
|
|||
priv->remove_auth_on_close = 1;
|
||||
}
|
||||
if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
|
||||
#ifdef DRIVER_CTX_DTOR
|
||||
DRIVER_CTX_DTOR(dev, ctx.handle);
|
||||
#endif
|
||||
if (dev->fn_tbl.context_ctor)
|
||||
dev->fn_tbl.context_ctor(dev, ctx.handle);
|
||||
DRM(ctxbitmap_free)( dev, ctx.handle );
|
||||
}
|
||||
|
||||
|
|
|
@ -117,9 +117,9 @@ void DRM(dma_takedown)(drm_device_t *dev)
|
|||
dma->bufs[i].buf_count *
|
||||
sizeof(*dma->bufs[0].buflist),
|
||||
DRM_MEM_BUFS);
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_destroy)(&dma->bufs[i].freelist);
|
||||
#endif
|
||||
|
||||
if (dev->fn_tbl.freelist_destroy)
|
||||
dev->fn_tbl.freelist_destroy(&dma->bufs[i].freelist);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -159,16 +159,13 @@ void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
|
|||
if ( __HAVE_DMA_WAITQUEUE && waitqueue_active(&buf->dma_wait)) {
|
||||
wake_up_interruptible(&buf->dma_wait);
|
||||
}
|
||||
#if __HAVE_DMA_FREELIST
|
||||
else {
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
/* If processes are waiting, the last one
|
||||
to wake will put the buffer on the free
|
||||
list. If no processes are waiting, we
|
||||
put the buffer on the freelist here. */
|
||||
DRM(freelist_put)(dev, &dma->bufs[buf->order].freelist, buf);
|
||||
}
|
||||
#endif
|
||||
/* If processes are waiting, the last one
|
||||
to wake will put the buffer on the free
|
||||
list. If no processes are waiting, we
|
||||
put the buffer on the freelist here. */
|
||||
else if (dev->fn_tbl.freelist_put)
|
||||
dev->fn_tbl.freelist_put(dev, &dev->dma->bufs[buf->order].freelist, buf);
|
||||
|
||||
}
|
||||
|
||||
#if !__HAVE_DMA_RECLAIM
|
||||
|
|
227
linux/drm_drv.h
227
linux/drm_drv.h
|
@ -67,82 +67,16 @@
|
|||
#ifndef __HAVE_MULTIPLE_DMA_QUEUES
|
||||
#define __HAVE_MULTIPLE_DMA_QUEUES 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_SCHEDULE
|
||||
#define __HAVE_DMA_SCHEDULE 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_FLUSH
|
||||
#define __HAVE_DMA_FLUSH 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_READY
|
||||
#define __HAVE_DMA_READY 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_QUIESCENT
|
||||
#define __HAVE_DMA_QUIESCENT 0
|
||||
#endif
|
||||
#ifndef __HAVE_RELEASE
|
||||
#define __HAVE_RELEASE 0
|
||||
#endif
|
||||
#ifndef __HAVE_COUNTERS
|
||||
#define __HAVE_COUNTERS 0
|
||||
#endif
|
||||
#ifndef __HAVE_SG
|
||||
#define __HAVE_SG 0
|
||||
#endif
|
||||
/* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the drm modules in
|
||||
* the DRI cvs tree, but it is required by the kernel tree's sparc
|
||||
* driver.
|
||||
*/
|
||||
#ifndef __HAVE_KERNEL_CTX_SWITCH
|
||||
#define __HAVE_KERNEL_CTX_SWITCH 0
|
||||
#endif
|
||||
#ifndef __HAVE_DRIVER_FOPS_READ
|
||||
#define __HAVE_DRIVER_FOPS_READ 0
|
||||
#endif
|
||||
#ifndef __HAVE_DRIVER_FOPS_POLL
|
||||
#define __HAVE_DRIVER_FOPS_POLL 0
|
||||
#endif
|
||||
|
||||
#ifndef DRIVER_PREINIT
|
||||
#define DRIVER_PREINIT(dev, flags) 0
|
||||
#endif
|
||||
#ifndef DRIVER_POSTINIT
|
||||
#define DRIVER_POSTINIT(dev, flags) 0
|
||||
#endif
|
||||
#ifndef DRIVER_PRERELEASE
|
||||
#define DRIVER_PRERELEASE()
|
||||
#endif
|
||||
#ifndef DRIVER_PRETAKEDOWN
|
||||
#define DRIVER_PRETAKEDOWN(dev)
|
||||
#endif
|
||||
#ifndef DRIVER_POSTCLEANUP
|
||||
#define DRIVER_POSTCLEANUP(dev)
|
||||
#endif
|
||||
#ifndef DRIVER_PRESETUP
|
||||
#define DRIVER_PRESETUP()
|
||||
#endif
|
||||
#ifndef DRIVER_POSTSETUP
|
||||
#define DRIVER_POSTSETUP()
|
||||
#endif
|
||||
#ifndef DRIVER_IOCTLS
|
||||
#define DRIVER_IOCTLS
|
||||
#endif
|
||||
#ifndef DRIVER_OPEN_HELPER
|
||||
#define DRIVER_OPEN_HELPER( priv, dev )
|
||||
#endif
|
||||
#ifndef DRIVER_FOPS
|
||||
#define DRIVER_FOPS \
|
||||
struct file_operations DRM(fops) = { \
|
||||
.owner = THIS_MODULE, \
|
||||
.open = DRM(open), \
|
||||
.flush = DRM(flush), \
|
||||
.release = DRM(release), \
|
||||
.ioctl = DRM(ioctl), \
|
||||
.mmap = DRM(mmap), \
|
||||
.fasync = DRM(fasync), \
|
||||
.poll = DRM(poll), \
|
||||
.read = DRM(read), \
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __exit drm_cleanup( drm_device_t *dev );
|
||||
|
||||
|
@ -180,10 +114,20 @@ drm_device_t DRM(device)[MAX_DEVICES];
|
|||
int DRM(numdevs) = 0;
|
||||
int DRM(fb_loaded) = 0;
|
||||
|
||||
DRIVER_FOPS;
|
||||
struct file_operations DRM(fops) = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = DRM(open),
|
||||
.flush = DRM(flush),
|
||||
.release = DRM(release),
|
||||
.ioctl = DRM(ioctl),
|
||||
.mmap = DRM(mmap),
|
||||
.fasync = DRM(fasync),
|
||||
.poll = DRM(poll),
|
||||
.read = DRM(read),
|
||||
};
|
||||
|
||||
/** Ioctl table */
|
||||
static drm_ioctl_desc_t DRM(ioctls)[] = {
|
||||
drm_ioctl_desc_t DRM(ioctls)[] = {
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { DRM(getunique), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { DRM(getmagic), 0, 0 },
|
||||
|
@ -222,12 +166,7 @@ static drm_ioctl_desc_t DRM(ioctls)[] = {
|
|||
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
|
||||
|
||||
#if __HAVE_DMA_FLUSH
|
||||
/* Gamma only, really */
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
|
||||
#else
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(noop), 1, 0 },
|
||||
#endif
|
||||
|
||||
#if __HAVE_DMA
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
|
||||
|
@ -279,7 +218,9 @@ static int DRM(setup)( drm_device_t *dev )
|
|||
{
|
||||
int i;
|
||||
|
||||
DRIVER_PRESETUP();
|
||||
if (dev->fn_tbl.presetup)
|
||||
dev->fn_tbl.presetup(dev);
|
||||
|
||||
atomic_set( &dev->ioctl_count, 0 );
|
||||
atomic_set( &dev->vma_count, 0 );
|
||||
dev->buf_use = 0;
|
||||
|
@ -325,9 +266,6 @@ static int DRM(setup)( drm_device_t *dev )
|
|||
#ifdef __HAVE_COUNTER14
|
||||
dev->types[14] = __HAVE_COUNTER14;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER15
|
||||
dev->types[14] = __HAVE_COUNTER14;
|
||||
#endif
|
||||
|
||||
for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
|
||||
atomic_set( &dev->counts[i], 0 );
|
||||
|
@ -385,7 +323,9 @@ static int DRM(setup)( drm_device_t *dev )
|
|||
* drm_select_queue fails between the time the interrupt is
|
||||
* initialized and the time the queues are initialized.
|
||||
*/
|
||||
DRIVER_POSTSETUP();
|
||||
if (dev->fn_tbl.postsetup)
|
||||
dev->fn_tbl.postsetup(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -410,7 +350,8 @@ static int DRM(takedown)( drm_device_t *dev )
|
|||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
DRIVER_PRETAKEDOWN(dev);
|
||||
if (dev->fn_tbl.pretakedown)
|
||||
dev->fn_tbl.pretakedown(dev);
|
||||
#if __HAVE_IRQ
|
||||
if ( dev->irq_enabled ) DRM(irq_uninstall)( dev );
|
||||
#endif
|
||||
|
@ -523,9 +464,9 @@ static int DRM(takedown)( drm_device_t *dev )
|
|||
#if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
|
||||
if ( dev->queuelist ) {
|
||||
for ( i = 0 ; i < dev->queue_count ; i++ ) {
|
||||
#if __HAVE_DMA_WAITLIST
|
||||
DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
|
||||
#endif
|
||||
if (dev->fn_tbl.waitlist_destroy)
|
||||
dev->fn_tbl.waitlist_destroy( &dev->queuelist[i]->waitlist);
|
||||
|
||||
if ( dev->queuelist[i] ) {
|
||||
DRM(free)( dev->queuelist[i],
|
||||
sizeof(*dev->queuelist[0]),
|
||||
|
@ -596,8 +537,13 @@ static int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
dev->pci_func = PCI_FUNC(pdev->devfn);
|
||||
dev->irq = pdev->irq;
|
||||
|
||||
if ((retcode = DRIVER_PREINIT(dev, ent->driver_data)))
|
||||
goto error_out_unreg;
|
||||
/* dev_priv_size can be changed by a driver in driver_register_fns */
|
||||
dev->dev_priv_size = sizeof(u32);
|
||||
DRM(driver_register_fns)(dev);
|
||||
|
||||
if (dev->fn_tbl.preinit)
|
||||
if ((retcode = dev->fn_tbl.preinit(dev, ent->driver_data)))
|
||||
goto error_out_unreg;
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
dev->agp = DRM(agp_init)();
|
||||
|
@ -643,9 +589,11 @@ static int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
dev->minor,
|
||||
pci_pretty_name(pdev)
|
||||
);
|
||||
|
||||
/* drivers add secondary heads here if needed */
|
||||
if ((retcode = DRIVER_POSTINIT(dev, ent->driver_data)))
|
||||
goto error_out_unreg;
|
||||
if (dev->fn_tbl.postinit)
|
||||
if ((retcode = dev->fn_tbl.postinit(dev, ent->driver_data)))
|
||||
goto error_out_unreg;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -765,7 +713,8 @@ static void __exit drm_cleanup( drm_device_t *dev )
|
|||
dev->agp = NULL;
|
||||
}
|
||||
#endif
|
||||
DRIVER_POSTCLEANUP(dev);
|
||||
if (dev->fn_tbl.postcleanup)
|
||||
dev->fn_tbl.postcleanup(dev);
|
||||
}
|
||||
|
||||
static void __exit drm_exit (void)
|
||||
|
@ -901,7 +850,8 @@ int DRM(release)( struct inode *inode, struct file *filp )
|
|||
|
||||
DRM_DEBUG( "open_count = %d\n", dev->open_count );
|
||||
|
||||
DRIVER_PRERELEASE();
|
||||
if (dev->fn_tbl.prerelease)
|
||||
dev->fn_tbl.prerelease(dev, filp);
|
||||
|
||||
/* ========================================================
|
||||
* Begin inline drm_release
|
||||
|
@ -916,9 +866,10 @@ int DRM(release)( struct inode *inode, struct file *filp )
|
|||
DRM_DEBUG( "File %p released, freeing lock for context %d\n",
|
||||
filp,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
|
||||
#if __HAVE_RELEASE
|
||||
DRIVER_RELEASE();
|
||||
#endif
|
||||
|
||||
if (dev->fn_tbl.release)
|
||||
dev->fn_tbl.release(dev, filp);
|
||||
|
||||
DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
|
||||
|
||||
|
@ -927,8 +878,7 @@ int DRM(release)( struct inode *inode, struct file *filp )
|
|||
processed via a callback to the X
|
||||
server. */
|
||||
}
|
||||
#if __HAVE_RELEASE
|
||||
else if ( priv->lock_count && dev->lock.hw_lock ) {
|
||||
else if ( dev->fn_tbl.release && priv->lock_count && dev->lock.hw_lock ) {
|
||||
/* The lock is required to reclaim buffers */
|
||||
DECLARE_WAITQUEUE( entry, current );
|
||||
|
||||
|
@ -957,12 +907,14 @@ int DRM(release)( struct inode *inode, struct file *filp )
|
|||
current->state = TASK_RUNNING;
|
||||
remove_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
if( !retcode ) {
|
||||
DRIVER_RELEASE();
|
||||
if (dev->fn_tbl.release)
|
||||
dev->fn_tbl.release(dev, filp);
|
||||
DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
}
|
||||
}
|
||||
#elif __HAVE_DMA
|
||||
|
||||
#if __HAVE_DMA
|
||||
DRM(reclaim_buffers)( filp );
|
||||
#endif
|
||||
|
||||
|
@ -975,9 +927,8 @@ int DRM(release)( struct inode *inode, struct file *filp )
|
|||
list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) {
|
||||
if ( pos->tag == priv &&
|
||||
pos->handle != DRM_KERNEL_CONTEXT ) {
|
||||
#ifdef DRIVER_CTX_DTOR
|
||||
DRIVER_CTX_DTOR( dev, pos->handle);
|
||||
#endif
|
||||
if (dev->fn_tbl.context_dtor)
|
||||
dev->fn_tbl.context_dtor(dev, pos->handle);
|
||||
#if __HAVE_CTX_BITMAP
|
||||
DRM(ctxbitmap_free)( dev, pos->handle );
|
||||
#endif
|
||||
|
@ -1134,9 +1085,8 @@ int DRM(lock)( struct inode *inode, struct file *filp,
|
|||
q = dev->queuelist[lock.context];
|
||||
#endif
|
||||
|
||||
#if __HAVE_DMA_FLUSH
|
||||
ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
|
||||
#endif
|
||||
if (dev->fn_tbl.dma_flush_block_and_flush)
|
||||
ret = dev->fn_tbl.dma_flush_block_and_flush(dev, lock.context, lock.flags);
|
||||
if ( !ret ) {
|
||||
add_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
for (;;) {
|
||||
|
@ -1165,9 +1115,8 @@ int DRM(lock)( struct inode *inode, struct file *filp,
|
|||
remove_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
}
|
||||
|
||||
#if __HAVE_DMA_FLUSH
|
||||
DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
|
||||
#endif
|
||||
if (dev->fn_tbl.dma_flush_unblock)
|
||||
dev->fn_tbl.dma_flush_unblock(dev, lock.context, lock.flags);
|
||||
|
||||
if ( !ret ) {
|
||||
sigemptyset( &dev->sigmask );
|
||||
|
@ -1180,26 +1129,17 @@ int DRM(lock)( struct inode *inode, struct file *filp,
|
|||
block_all_signals( DRM(notifier),
|
||||
&dev->sigdata, &dev->sigmask );
|
||||
|
||||
#if __HAVE_DMA_READY
|
||||
if ( lock.flags & _DRM_LOCK_READY ) {
|
||||
DRIVER_DMA_READY();
|
||||
if (dev->fn_tbl.dma_ready && (lock.flags & _DRM_LOCK_READY))
|
||||
dev->fn_tbl.dma_ready(dev);
|
||||
|
||||
if ( dev->fn_tbl.dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT ))
|
||||
return dev->fn_tbl.dma_quiescent(dev);
|
||||
|
||||
|
||||
if ( dev->fn_tbl.kernel_context_switch && dev->last_context != lock.context ) {
|
||||
dev->fn_tbl.kernel_context_switch(dev, dev->last_context,
|
||||
lock.context);
|
||||
}
|
||||
#endif
|
||||
#if __HAVE_DMA_QUIESCENT
|
||||
if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
|
||||
DRIVER_DMA_QUIESCENT();
|
||||
}
|
||||
#endif
|
||||
/* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the
|
||||
* drm modules in the DRI cvs tree, but it is required
|
||||
* by the Sparc driver.
|
||||
*/
|
||||
#if __HAVE_KERNEL_CTX_SWITCH
|
||||
if ( dev->last_context != lock.context ) {
|
||||
DRM(context_switch)(dev, dev->last_context,
|
||||
lock.context);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
|
||||
|
@ -1236,40 +1176,21 @@ int DRM(unlock)( struct inode *inode, struct file *filp,
|
|||
|
||||
atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
|
||||
|
||||
/* __HAVE_KERNEL_CTX_SWITCH isn't used by any of the drm
|
||||
* modules in the DRI cvs tree, but it is required by the
|
||||
* Sparc driver.
|
||||
*/
|
||||
#if __HAVE_KERNEL_CTX_SWITCH
|
||||
/* We no longer really hold it, but if we are the next
|
||||
* agent to request it then we should just be able to
|
||||
* take it immediately and not eat the ioctl.
|
||||
*/
|
||||
dev->lock.filp = 0;
|
||||
if (dev->fn_tbl.kernel_context_switch_unlock)
|
||||
dev->fn_tbl.kernel_context_switch_unlock(dev);
|
||||
else
|
||||
{
|
||||
__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
|
||||
unsigned int old, new, prev, ctx;
|
||||
DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
|
||||
ctx = lock.context;
|
||||
do {
|
||||
old = *plock;
|
||||
new = ctx;
|
||||
prev = cmpxchg(plock, old, new);
|
||||
} while (prev != old);
|
||||
}
|
||||
wake_up_interruptible(&dev->lock.lock_queue);
|
||||
#else
|
||||
DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
#if __HAVE_DMA_SCHEDULE
|
||||
DRM(dma_schedule)( dev, 1 );
|
||||
#endif
|
||||
if (dev->fn_tbl.dma_schedule)
|
||||
dev->fn_tbl.dma_schedule(dev, 1);
|
||||
|
||||
if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT ) ) {
|
||||
DRM_ERROR( "\n" );
|
||||
if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT ) ) {
|
||||
DRM_ERROR( "\n" );
|
||||
}
|
||||
}
|
||||
#endif /* !__HAVE_KERNEL_CTX_SWITCH */
|
||||
|
||||
unblock_all_signals();
|
||||
return 0;
|
||||
|
|
|
@ -73,7 +73,8 @@ int DRM(open_helper)(struct inode *inode, struct file *filp, drm_device_t *dev)
|
|||
priv->authenticated = capable(CAP_SYS_ADMIN);
|
||||
priv->lock_count = 0;
|
||||
|
||||
DRIVER_OPEN_HELPER( priv, dev );
|
||||
if (dev->fn_tbl.open_helper)
|
||||
dev->fn_tbl.open_helper(dev, priv);
|
||||
|
||||
down(&dev->struct_sem);
|
||||
if (!dev->file_last) {
|
||||
|
@ -131,19 +132,15 @@ int DRM(fasync)(int fd, struct file *filp, int on)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if !__HAVE_DRIVER_FOPS_POLL
|
||||
/** No-op. */
|
||||
unsigned int DRM(poll)(struct file *filp, struct poll_table_struct *wait)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#if !__HAVE_DRIVER_FOPS_READ
|
||||
/** No-op. */
|
||||
ssize_t DRM(read)(struct file *filp, char __user *buf, size_t count, loff_t *off)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -199,6 +199,7 @@ static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *d
|
|||
iounmap(pt);
|
||||
}
|
||||
|
||||
|
||||
#if DEBUG_MEMORY
|
||||
#include "drm_memory_debug.h"
|
||||
#else
|
||||
|
|
|
@ -8,9 +8,5 @@
|
|||
*/
|
||||
#define DRM(x) ffb_##x
|
||||
|
||||
/* General customization:
|
||||
*/
|
||||
#define __HAVE_KERNEL_CTX_SWITCH 1
|
||||
#define __HAVE_RELEASE 1
|
||||
#endif
|
||||
|
||||
|
|
|
@ -537,3 +537,63 @@ int DRM(rmctx)(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ffb_driver_release(drm_device_t *dev)
|
||||
{
|
||||
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
|
||||
int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock);
|
||||
int idx;
|
||||
|
||||
idx = context - 1;
|
||||
if (fpriv &&
|
||||
context != DRM_KERNEL_CONTEXT &&
|
||||
fpriv->hw_state[idx] != NULL) {
|
||||
kfree(fpriv->hw_state[idx]);
|
||||
fpriv->hw_state[idx] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int ffb_driver_presetup(drm_device_t *dev)
|
||||
{
|
||||
int ret;
|
||||
ret = ffb_presetup(dev);
|
||||
if (_ret != 0) return ret;
|
||||
}
|
||||
|
||||
static void ffb_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
if (dev->dev_private) kfree(dev->dev_private);
|
||||
}
|
||||
|
||||
static void ffb_driver_postcleanup(drm_device_t *dev)
|
||||
{
|
||||
if (ffb_position != NULL) kfree(ffb_position);
|
||||
}
|
||||
|
||||
static int ffb_driver_kernel_context_switch_unlock(struct drm_device *dev)
|
||||
{
|
||||
dev->lock.filp = 0;
|
||||
{
|
||||
__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
|
||||
unsigned int old, new, prev, ctx;
|
||||
|
||||
ctx = lock.context;
|
||||
do {
|
||||
old = *plock;
|
||||
new = ctx;
|
||||
prev = cmpxchg(plock, old, new);
|
||||
} while (prev != old);
|
||||
}
|
||||
wake_up_interruptible(&dev->lock.lock_queue);
|
||||
}
|
||||
|
||||
static void ffb_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
DRM(fops).get_unmapped_area = ffb_get_unmapped_area;
|
||||
dev->fn_tbl.release = ffb_driver_release;
|
||||
dev->fn_tbl.presetup = ffb_driver_presetup;
|
||||
dev->fn_tbl.pretakedown = ffb_driver_pretakedown;
|
||||
dev->fn_tbl.postcleanup = ffb_driver_postcleanup;
|
||||
dev->fn_tbl.kernel_context_switch = ffb_context_switch;
|
||||
dev->fn_tbl.kernel_context_switch_unlock = ffb_driver_kernel_context_switch_unlock;
|
||||
}
|
||||
|
|
|
@ -26,53 +26,7 @@
|
|||
#define DRIVER_MINOR 0
|
||||
#define DRIVER_PATCHLEVEL 1
|
||||
|
||||
#define DRIVER_FOPS \
|
||||
static struct file_operations DRM(fops) = { \
|
||||
.owner = THIS_MODULE, \
|
||||
.open = DRM(open), \
|
||||
.flush = DRM(flush), \
|
||||
.release = DRM(release), \
|
||||
.ioctl = DRM(ioctl), \
|
||||
.mmap = DRM(mmap), \
|
||||
.read = DRM(read), \
|
||||
.fasync = DRM(fasync), \
|
||||
.poll = DRM(poll), \
|
||||
.get_unmapped_area = ffb_get_unmapped_area, \
|
||||
}
|
||||
|
||||
#define DRIVER_COUNT_CARDS() ffb_count_card_instances()
|
||||
/* Allocate private structure and fill it */
|
||||
#define DRIVER_PRESETUP() do { \
|
||||
int _ret; \
|
||||
_ret = ffb_presetup(dev); \
|
||||
if (_ret != 0) return _ret; \
|
||||
} while(0)
|
||||
|
||||
/* Free private structure */
|
||||
#define DRIVER_PRETAKEDOWN() do { \
|
||||
if (dev->dev_private) kfree(dev->dev_private); \
|
||||
} while(0)
|
||||
|
||||
#define DRIVER_POSTCLEANUP() do { \
|
||||
if (ffb_position != NULL) kfree(ffb_position); \
|
||||
} while(0)
|
||||
|
||||
/* We have to free up the rogue hw context state holding error or
|
||||
* else we will leak it.
|
||||
*/
|
||||
#define DRIVER_RELEASE() do { \
|
||||
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; \
|
||||
int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock); \
|
||||
int idx; \
|
||||
\
|
||||
idx = context - 1; \
|
||||
if (fpriv && \
|
||||
context != DRM_KERNEL_CONTEXT && \
|
||||
fpriv->hw_state[idx] != NULL) { \
|
||||
kfree(fpriv->hw_state[idx]); \
|
||||
fpriv->hw_state[idx] = NULL; \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
/* For mmap customization */
|
||||
#define DRIVER_GET_MAP_OFS() (map->offset & 0xffffffff)
|
||||
|
@ -275,11 +229,11 @@ static drm_map_t *ffb_find_map(struct file *filp, unsigned long off)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static unsigned long ffb_get_unmapped_area(struct file *filp,
|
||||
unsigned long hint,
|
||||
unsigned long len,
|
||||
unsigned long pgoff,
|
||||
unsigned long flags)
|
||||
unsigned long ffb_get_unmapped_area(struct file *filp,
|
||||
unsigned long hint,
|
||||
unsigned long len,
|
||||
unsigned long pgoff,
|
||||
unsigned long flags)
|
||||
{
|
||||
drm_map_t *map = ffb_find_map(filp, pgoff << PAGE_SHIFT);
|
||||
unsigned long addr = -ENOMEM;
|
||||
|
|
|
@ -274,3 +274,10 @@ typedef struct ffb_dev_priv {
|
|||
/* Context table. */
|
||||
struct ffb_hw_context *hw_state[FFB_MAX_CTXS];
|
||||
} ffb_dev_priv_t;
|
||||
|
||||
extern struct file_operations DRM(fops);
|
||||
extern unsigned long ffb_get_unmapped_area(struct file *filp,
|
||||
unsigned long hint,
|
||||
unsigned long len,
|
||||
unsigned long pgoff,
|
||||
unsigned long flags);
|
||||
|
|
|
@ -63,12 +63,6 @@
|
|||
#define __HAVE_COUNTER9 _DRM_STAT_SPECIAL
|
||||
#define __HAVE_COUNTER10 _DRM_STAT_MISSED
|
||||
|
||||
/* Driver customization:
|
||||
*/
|
||||
#define DRIVER_PRETAKEDOWN( dev ) do { \
|
||||
gamma_do_cleanup_dma( dev ); \
|
||||
} while (0)
|
||||
|
||||
/* DMA customization:
|
||||
*/
|
||||
#define __HAVE_DMA 1
|
||||
|
@ -77,38 +71,10 @@
|
|||
#define __HAVE_OLD_DMA 1
|
||||
#define __HAVE_PCI_DMA 1
|
||||
|
||||
#define __HAVE_DRIVER_FOPS_READ 1
|
||||
#define __HAVE_DRIVER_FOPS_POLL 1
|
||||
|
||||
#define __HAVE_MULTIPLE_DMA_QUEUES 1
|
||||
#define __HAVE_DMA_WAITQUEUE 1
|
||||
|
||||
#define __HAVE_DMA_WAITLIST 1
|
||||
#define __HAVE_DMA_FREELIST 1
|
||||
|
||||
#define __HAVE_DMA_FLUSH 1
|
||||
#define __HAVE_DMA_SCHEDULE 1
|
||||
|
||||
#define __HAVE_DMA_READY 1
|
||||
#define DRIVER_DMA_READY() do { \
|
||||
gamma_dma_ready(dev); \
|
||||
} while (0)
|
||||
|
||||
#define __HAVE_DMA_QUIESCENT 1
|
||||
#define DRIVER_DMA_QUIESCENT() do { \
|
||||
drm_gamma_private_t *dev_priv = \
|
||||
(drm_gamma_private_t *)dev->dev_private; \
|
||||
if (dev_priv->num_rast == 2) \
|
||||
gamma_dma_quiescent_dual(dev); \
|
||||
else gamma_dma_quiescent_single(dev); \
|
||||
return 0; \
|
||||
} while (0)
|
||||
|
||||
#define __HAVE_IRQ 1
|
||||
#define __HAVE_IRQ_BH 1
|
||||
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
|
||||
((drm_gamma_private_t *)((dev)->dev_private))->buffers
|
||||
|
||||
|
||||
#endif /* __GAMMA_H__ */
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
|
||||
DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
|
||||
|
||||
ssize_t DRM(read)(struct file *filp, char __user *buf, size_t count, loff_t *off)
|
||||
ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
|
@ -128,7 +128,7 @@ int DRM(write_string)(drm_device_t *dev, const char *s)
|
|||
return 0;
|
||||
}
|
||||
|
||||
unsigned int DRM(poll)(struct file *filp, struct poll_table_struct *wait)
|
||||
unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
|
|
|
@ -646,12 +646,12 @@ static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_FIND_MAP( dev_priv->mmio0, init->mmio0 );
|
||||
DRM_FIND_MAP( dev_priv->mmio1, init->mmio1 );
|
||||
DRM_FIND_MAP( dev_priv->mmio2, init->mmio2 );
|
||||
DRM_FIND_MAP( dev_priv->mmio3, init->mmio3 );
|
||||
|
||||
|
||||
dev_priv->mmio0 = drm_core_findmap(dev, init->mmio0);
|
||||
dev_priv->mmio1 = drm_core_findmap(dev, init->mmio1);
|
||||
dev_priv->mmio2 = drm_core_findmap(dev, init->mmio2);
|
||||
dev_priv->mmio3 = drm_core_findmap(dev, init->mmio3);
|
||||
|
||||
dev_priv->sarea_priv = (drm_gamma_sarea_t *)
|
||||
((u8 *)dev_priv->sarea->handle +
|
||||
init->sarea_priv_offset);
|
||||
|
@ -668,9 +668,8 @@ static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
|
|||
|
||||
buf = dma->buflist[GLINT_DRI_BUF_COUNT];
|
||||
} else {
|
||||
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
|
||||
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
drm_core_ioremap( dev->agp_buffer_map, dev);
|
||||
|
||||
buf = dma->buflist[GLINT_DRI_BUF_COUNT];
|
||||
pgt = buf->address;
|
||||
|
@ -706,10 +705,9 @@ int gamma_do_cleanup_dma( drm_device_t *dev )
|
|||
#endif
|
||||
|
||||
if ( dev->dev_private ) {
|
||||
drm_gamma_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
if ( dev->agp_buffer_map != NULL )
|
||||
drm_core_ioremapfree( dev->agp_buffer_map, dev );
|
||||
|
||||
DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
|
||||
DRM_MEM_DRIVER );
|
||||
|
@ -911,3 +909,49 @@ void DRM(driver_irq_uninstall)( drm_device_t *dev ) {
|
|||
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 );
|
||||
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 );
|
||||
}
|
||||
|
||||
extern drm_ioctl_desc_t DRM(ioctls)[];
|
||||
|
||||
static int gamma_driver_preinit(drm_device_t *dev, unsigned long flags)
|
||||
{
|
||||
/* reset the finish ioctl */
|
||||
DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_FINISH)].func = DRM(finish);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gamma_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
gamma_do_cleanup_dma(dev);
|
||||
}
|
||||
|
||||
static void gamma_driver_dma_ready(drm_device_t *dev)
|
||||
{
|
||||
gamma_dma_ready(dev);
|
||||
}
|
||||
|
||||
static int gamma_driver_dma_quiescent(drm_device_t *dev)
|
||||
{
|
||||
drm_gamma_private_t *dev_priv = (
|
||||
drm_gamma_private_t *)dev->dev_private;
|
||||
if (dev_priv->num_rast == 2)
|
||||
gamma_dma_quiescent_dual(dev);
|
||||
else gamma_dma_quiescent_single(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gamma_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
DRM(fops).read = gamma_fops_read;
|
||||
DRM(fops).poll = gamma_fops_poll;
|
||||
dev->fn_tbl.preinit = gamma_driver_preinit;
|
||||
dev->fn_tbl.pretakedown = gamma_driver_pretakedown;
|
||||
dev->fn_tbl.dma_ready = gamma_driver_dma_ready;
|
||||
dev->fn_tbl.dma_quiescent = gamma_driver_dma_quiescent;
|
||||
dev->fn_tbl.dma_flush_block_and_flush = gamma_flush_block_and_flush;
|
||||
dev->fn_tbl.dma_flush_unblock = gamma_flush_unblock;
|
||||
dev->fn_tbl.dma_schedule = gamma_dma_schedule;
|
||||
dev->fn_tbl.waitlist_destroy = gamma_waitlist_destroy;
|
||||
dev->fn_tbl.freelist_create = gamma_freelist_create;
|
||||
dev->fn_tbl.freelist_put = gamma_freelist_put;
|
||||
dev->fn_tbl.freelist_destroy = gamma_freelist_destroy;
|
||||
}
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
typedef struct drm_gamma_private {
|
||||
drm_gamma_sarea_t *sarea_priv;
|
||||
drm_map_t *sarea;
|
||||
drm_map_t *buffers;
|
||||
drm_map_t *mmio0;
|
||||
drm_map_t *mmio1;
|
||||
drm_map_t *mmio2;
|
||||
|
@ -91,6 +90,10 @@ extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
|
|||
drm_buf_t *buf);
|
||||
extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
|
||||
|
||||
/* externs for gamma changes to the ops */
|
||||
extern struct file_operations DRM(fops);
|
||||
extern unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait);
|
||||
extern ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off);
|
||||
|
||||
|
||||
#define GLINT_DRI_BUF_COUNT 256
|
||||
|
|
25
linux/i810.h
25
linux/i810.h
|
@ -84,41 +84,16 @@
|
|||
#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY
|
||||
#define __HAVE_COUNTER9 _DRM_STAT_DMA
|
||||
|
||||
/* Driver customization:
|
||||
*/
|
||||
#define __HAVE_RELEASE 1
|
||||
#define DRIVER_RELEASE() do { \
|
||||
i810_reclaim_buffers( filp ); \
|
||||
} while (0)
|
||||
|
||||
#define DRIVER_PRETAKEDOWN( dev ) do { \
|
||||
i810_dma_cleanup( dev ); \
|
||||
} while (0)
|
||||
|
||||
/* DMA customization:
|
||||
*/
|
||||
#define __HAVE_DMA 1
|
||||
#define __HAVE_DMA_QUEUE 1
|
||||
#define __HAVE_DMA_WAITLIST 0
|
||||
#define __HAVE_DMA_RECLAIM 1
|
||||
|
||||
#define __HAVE_DMA_QUIESCENT 1
|
||||
#define DRIVER_DMA_QUIESCENT() do { \
|
||||
i810_dma_quiescent( dev ); \
|
||||
} while (0)
|
||||
|
||||
/* Don't need an irq any more. The template code will make sure that
|
||||
* a noop stub is generated for compatibility.
|
||||
*/
|
||||
/* XXX: Add vblank support? */
|
||||
#define __HAVE_IRQ 0
|
||||
|
||||
/* Buffer customization:
|
||||
*/
|
||||
|
||||
#define DRIVER_BUF_PRIV_T drm_i810_buf_priv_t
|
||||
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
|
||||
((drm_i810_private_t *)((dev)->dev_private))->buffer_map
|
||||
|
||||
#endif
|
||||
|
|
|
@ -371,15 +371,15 @@ static int i810_dma_initialize(drm_device_t *dev,
|
|||
DRM_ERROR("can not find sarea!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
|
||||
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
|
||||
if (!dev_priv->mmio_map) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i810_dma_cleanup(dev);
|
||||
DRM_ERROR("can not find mmio map!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
|
||||
if (!dev_priv->buffer_map) {
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if (!dev->agp_buffer_map) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i810_dma_cleanup(dev);
|
||||
DRM_ERROR("can not find dma buffer map!\n");
|
||||
|
@ -1394,3 +1394,28 @@ int i810_flip_bufs(struct inode *inode, struct file *filp,
|
|||
i810_dma_dispatch_flip( dev );
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i810_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
i810_dma_cleanup( dev );
|
||||
}
|
||||
|
||||
static void i810_driver_release(drm_device_t *dev, struct file *filp)
|
||||
{
|
||||
i810_reclaim_buffers(filp);
|
||||
}
|
||||
|
||||
static int i810_driver_dma_quiescent(drm_device_t *dev)
|
||||
{
|
||||
i810_dma_quiescent( dev );
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i810_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
dev->dev_priv_size = sizeof(drm_i810_buf_priv_t);
|
||||
dev->fn_tbl.pretakedown = i810_driver_pretakedown;
|
||||
dev->fn_tbl.release = i810_driver_release;
|
||||
dev->fn_tbl.dma_quiescent = i810_driver_dma_quiescent;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,6 @@ typedef struct _drm_i810_ring_buffer{
|
|||
|
||||
typedef struct drm_i810_private {
|
||||
drm_map_t *sarea_map;
|
||||
drm_map_t *buffer_map;
|
||||
drm_map_t *mmio_map;
|
||||
|
||||
drm_i810_sarea_t *sarea_priv;
|
||||
|
|
27
linux/i830.h
27
linux/i830.h
|
@ -83,30 +83,12 @@
|
|||
#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY
|
||||
#define __HAVE_COUNTER9 _DRM_STAT_DMA
|
||||
|
||||
/* Driver customization:
|
||||
*/
|
||||
#define __HAVE_RELEASE 1
|
||||
#define DRIVER_RELEASE() do { \
|
||||
i830_reclaim_buffers( filp ); \
|
||||
} while (0)
|
||||
|
||||
#define DRIVER_PRETAKEDOWN( dev ) do { \
|
||||
i830_dma_cleanup( dev ); \
|
||||
} while (0)
|
||||
|
||||
/* DMA customization:
|
||||
*/
|
||||
#define __HAVE_DMA 1
|
||||
#define __HAVE_DMA_QUEUE 1
|
||||
#define __HAVE_DMA_WAITLIST 0
|
||||
#define __HAVE_DMA_RECLAIM 1
|
||||
|
||||
#define __HAVE_DMA_QUIESCENT 1
|
||||
#define DRIVER_DMA_QUIESCENT() do { \
|
||||
i830_dma_quiescent( dev ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* Driver will work either way: IRQ's save cpu time when waiting for
|
||||
* the card, but are subject to subtle interactions between bios,
|
||||
* hardware and the driver.
|
||||
|
@ -121,13 +103,4 @@
|
|||
#define __HAVE_IRQ 0
|
||||
#endif
|
||||
|
||||
|
||||
/* Buffer customization:
|
||||
*/
|
||||
|
||||
#define DRIVER_BUF_PRIV_T drm_i830_buf_priv_t
|
||||
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
|
||||
((drm_i830_private_t *)((dev)->dev_private))->buffer_map
|
||||
|
||||
#endif
|
||||
|
|
|
@ -378,15 +378,15 @@ static int i830_dma_initialize(drm_device_t *dev,
|
|||
DRM_ERROR("can not find sarea!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
|
||||
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
|
||||
if(!dev_priv->mmio_map) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i830_dma_cleanup(dev);
|
||||
DRM_ERROR("can not find mmio map!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
|
||||
if(!dev_priv->buffer_map) {
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if(!dev->agp_buffer_map) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i830_dma_cleanup(dev);
|
||||
DRM_ERROR("can not find dma buffer map!\n");
|
||||
|
@ -1589,3 +1589,29 @@ int i830_setparam( struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void i830_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
i830_dma_cleanup( dev );
|
||||
}
|
||||
|
||||
static void i830_driver_release(drm_device_t *dev, struct file *filp)
|
||||
{
|
||||
i830_reclaim_buffers(filp);
|
||||
}
|
||||
|
||||
static int i830_driver_dma_quiescent(drm_device_t *dev)
|
||||
{
|
||||
i830_dma_quiescent( dev );
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i830_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
dev->dev_priv_size = sizeof(drm_i830_buf_priv_t);
|
||||
dev->fn_tbl.pretakedown = i830_driver_pretakedown;
|
||||
dev->fn_tbl.release = i830_driver_release;
|
||||
dev->fn_tbl.dma_quiescent = i830_driver_dma_quiescent;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,6 @@ typedef struct _drm_i830_ring_buffer{
|
|||
|
||||
typedef struct drm_i830_private {
|
||||
drm_map_t *sarea_map;
|
||||
drm_map_t *buffer_map;
|
||||
drm_map_t *mmio_map;
|
||||
|
||||
drm_i830_sarea_t *sarea_priv;
|
||||
|
|
|
@ -35,4 +35,6 @@
|
|||
#define SAVAGE_DEFAULT_USEC_TIMEOUT 10000
|
||||
#define SAVAGE_FREELIST_DEBUG 0
|
||||
|
||||
|
||||
void DRM(driver_register_fns)(drm_device_t *dev)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -46,3 +46,5 @@
|
|||
#include "drm_proc.h"
|
||||
#include "drm_vm.h"
|
||||
#include "drm_stub.h"
|
||||
|
||||
|
||||
|
|
|
@ -49,3 +49,8 @@
|
|||
#include "drm_proc.h"
|
||||
#include "drm_vm.h"
|
||||
#include "drm_stub.h"
|
||||
|
||||
void DRM(driver_register_fns)(drm_device_t *dev)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ int i915_dma_cleanup(drm_device_t *dev)
|
|||
(drm_i915_private_t *) dev->dev_private;
|
||||
|
||||
if (dev_priv->ring.virtual_start) {
|
||||
DRM_IOREMAPFREE( &dev_priv->ring.map, dev);
|
||||
drm_core_ioremapfree( &dev_priv->ring.map, dev);
|
||||
}
|
||||
|
||||
if (dev_priv->hw_status_page) {
|
||||
|
@ -135,8 +135,8 @@ static int i915_initialize(drm_device_t *dev,
|
|||
i915_dma_cleanup(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
|
||||
|
||||
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
|
||||
if(!dev_priv->mmio_map) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i915_dma_cleanup(dev);
|
||||
|
@ -159,7 +159,7 @@ static int i915_initialize(drm_device_t *dev,
|
|||
dev_priv->ring.map.flags = 0;
|
||||
dev_priv->ring.map.mtrr = 0;
|
||||
|
||||
DRM_IOREMAP( &dev_priv->ring.map, dev );
|
||||
drm_core_ioremap( &dev_priv->ring.map, dev );
|
||||
|
||||
if (dev_priv->ring.map.handle == NULL) {
|
||||
dev->dev_private = (void *) dev_priv;
|
||||
|
@ -770,3 +770,26 @@ int i915_setparam( DRM_IOCTL_ARGS )
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
if ( dev->dev_private ) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
i915_mem_takedown( &(dev_priv->agp_heap) );
|
||||
}
|
||||
i915_dma_cleanup( dev );
|
||||
}
|
||||
|
||||
static void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp)
|
||||
{
|
||||
if ( dev->dev_private ) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
i915_mem_release( dev, filp, dev_priv->agp_heap );
|
||||
}
|
||||
}
|
||||
|
||||
void i915_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
dev->fn_tbl.pretakedown = i915_driver_pretakedown;
|
||||
dev->fn_tbl.prerelease = i915_driver_prerelease;
|
||||
}
|
||||
|
|
|
@ -655,14 +655,14 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
mach64_do_cleanup_dma(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->fb, init->fb_offset );
|
||||
dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
|
||||
if (!dev_priv->fb) {
|
||||
DRM_ERROR("can not find frame buffer map!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
mach64_do_cleanup_dma(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
|
||||
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
|
||||
if (!dev_priv->mmio) {
|
||||
DRM_ERROR("can not find mmio map!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -675,14 +675,14 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
init->sarea_priv_offset);
|
||||
|
||||
if( !dev_priv->is_pci ) {
|
||||
DRM_FIND_MAP( dev_priv->ring_map, init->ring_offset );
|
||||
dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
|
||||
if ( !dev_priv->ring_map ) {
|
||||
DRM_ERROR( "can not find ring map!\n" );
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
mach64_do_cleanup_dma(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_IOREMAP( dev_priv->ring_map, dev );
|
||||
drm_core_ioremap( dev_priv->ring_map, dev );
|
||||
if ( !dev_priv->ring_map->handle ) {
|
||||
DRM_ERROR( "can not ioremap virtual address for"
|
||||
" descriptor ring\n" );
|
||||
|
@ -690,23 +690,26 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
mach64_do_cleanup_dma( dev );
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
|
||||
if ( !dev_priv->buffers ) {
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if ( !dev->agp_buffer_map ) {
|
||||
DRM_ERROR( "can not find dma buffer map!\n" );
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
mach64_do_cleanup_dma( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
if ( !dev_priv->buffers->handle ) {
|
||||
/* there might be a nicer way to do this -
|
||||
dev isn't passed all the way though the mach64 - DA */
|
||||
dev_priv->dev_buffers = dev->agp_buffer_map;
|
||||
|
||||
drm_core_ioremap( dev->agp_buffer_map, dev );
|
||||
if ( !dev->agp_buffer_map->handle ) {
|
||||
DRM_ERROR( "can not ioremap virtual address for"
|
||||
" dma buffer\n" );
|
||||
dev->dev_private = (void *) dev_priv;
|
||||
mach64_do_cleanup_dma( dev );
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->agp_textures,
|
||||
init->agp_textures_offset );
|
||||
dev_priv->agp_textures = drm_core_findmap(dev, init->agp_textures_offset);
|
||||
if (!dev_priv->agp_textures) {
|
||||
DRM_ERROR( "can not find agp texture region!\n" );
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -987,11 +990,11 @@ int mach64_do_cleanup_dma( drm_device_t *dev )
|
|||
}
|
||||
} else {
|
||||
if ( dev_priv->ring_map )
|
||||
DRM_IOREMAPFREE( dev_priv->ring_map, dev );
|
||||
drm_core_ioremapfree( dev_priv->ring_map, dev );
|
||||
}
|
||||
|
||||
if ( dev_priv->buffers )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
if ( dev->agp_buffer_map )
|
||||
drm_core_ioremapfree( dev->agp_buffer_map, dev );
|
||||
|
||||
mach64_destroy_freelist( dev );
|
||||
|
||||
|
@ -1323,3 +1326,12 @@ int mach64_dma_buffers( DRM_IOCTL_ARGS )
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void mach64_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
mach64_do_cleanup_dma( dev );
|
||||
}
|
||||
|
||||
void mach64_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
dev->fn_tbl.pretakedown = mach64_driver_pretakedown;
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ typedef struct drm_mach64_private {
|
|||
drm_local_map_t *fb;
|
||||
drm_local_map_t *mmio;
|
||||
drm_local_map_t *ring_map;
|
||||
drm_local_map_t *buffers;
|
||||
drm_local_map_t *dev_buffers; /* this is a pointer to a structure in dev */
|
||||
drm_local_map_t *agp_textures;
|
||||
} drm_mach64_private_t;
|
||||
|
||||
|
@ -791,7 +791,7 @@ do { \
|
|||
#define GETBUFPTR( __buf ) \
|
||||
((dev_priv->is_pci) ? \
|
||||
((u32 *)(__buf)->address) : \
|
||||
((u32 *)((char *)dev_priv->buffers->handle + (__buf)->offset)))
|
||||
((u32 *)((char *)dev_priv->dev_buffers->handle + (__buf)->offset)))
|
||||
|
||||
#define GETBUFADDR( __buf ) ((u32)(__buf)->bus_address)
|
||||
|
||||
|
|
|
@ -500,7 +500,7 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
|
||||
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
|
||||
if(!dev_priv->mmio) {
|
||||
DRM_ERROR( "failed to find mmio region!\n" );
|
||||
/* Assign dev_private so we can do cleanup. */
|
||||
|
@ -508,7 +508,7 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
mga_do_cleanup_dma( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->status, init->status_offset );
|
||||
dev_priv->status = drm_core_findmap(dev, init->status_offset);
|
||||
if(!dev_priv->status) {
|
||||
DRM_ERROR( "failed to find status page!\n" );
|
||||
/* Assign dev_private so we can do cleanup. */
|
||||
|
@ -516,8 +516,7 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
mga_do_cleanup_dma( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_FIND_MAP( dev_priv->warp, init->warp_offset );
|
||||
dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
|
||||
if(!dev_priv->warp) {
|
||||
DRM_ERROR( "failed to find warp microcode region!\n" );
|
||||
/* Assign dev_private so we can do cleanup. */
|
||||
|
@ -525,7 +524,7 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
mga_do_cleanup_dma( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->primary, init->primary_offset );
|
||||
dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
|
||||
if(!dev_priv->primary) {
|
||||
DRM_ERROR( "failed to find primary dma region!\n" );
|
||||
/* Assign dev_private so we can do cleanup. */
|
||||
|
@ -533,8 +532,8 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
mga_do_cleanup_dma( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
|
||||
if(!dev_priv->buffers) {
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if(!dev->agp_buffer_map) {
|
||||
DRM_ERROR( "failed to find dma buffer region!\n" );
|
||||
/* Assign dev_private so we can do cleanup. */
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -546,13 +545,13 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
(drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
|
||||
init->sarea_priv_offset);
|
||||
|
||||
DRM_IOREMAP( dev_priv->warp, dev );
|
||||
DRM_IOREMAP( dev_priv->primary, dev );
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
drm_core_ioremap( dev_priv->warp, dev );
|
||||
drm_core_ioremap( dev_priv->primary, dev );
|
||||
drm_core_ioremap( dev->agp_buffer_map, dev );
|
||||
|
||||
if(!dev_priv->warp->handle ||
|
||||
!dev_priv->primary->handle ||
|
||||
!dev_priv->buffers->handle ) {
|
||||
!dev->agp_buffer_map->handle ) {
|
||||
DRM_ERROR( "failed to ioremap agp regions!\n" );
|
||||
/* Assign dev_private so we can do cleanup. */
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -643,11 +642,11 @@ int mga_do_cleanup_dma( drm_device_t *dev )
|
|||
drm_mga_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if ( dev_priv->warp != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->warp, dev );
|
||||
drm_core_ioremapfree( dev_priv->warp, dev );
|
||||
if ( dev_priv->primary != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->primary, dev );
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
drm_core_ioremapfree( dev_priv->primary, dev );
|
||||
if ( dev->agp_buffer_map != NULL )
|
||||
drm_core_ioremapfree( dev->agp_buffer_map, dev );
|
||||
|
||||
if ( dev_priv->head != NULL ) {
|
||||
mga_freelist_cleanup( dev );
|
||||
|
@ -800,3 +799,20 @@ int mga_dma_buffers( DRM_IOCTL_ARGS )
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mga_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
mga_do_cleanup_dma( dev );
|
||||
}
|
||||
|
||||
static int mga_driver_dma_quiescent(drm_device_t *dev)
|
||||
{
|
||||
drm_mga_private_t *dev_priv = dev->dev_private;
|
||||
return mga_do_wait_for_idle( dev_priv );
|
||||
}
|
||||
|
||||
void mga_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
dev->fn_tbl.pretakedown = mga_driver_pretakedown;
|
||||
dev->fn_tbl.dma_quiescent = mga_driver_dma_quiescent;
|
||||
}
|
||||
|
|
|
@ -467,29 +467,29 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
|
||||
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
|
||||
if(!dev_priv->mmio) {
|
||||
DRM_ERROR("could not find mmio region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->cce_ring, init->ring_offset );
|
||||
dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
|
||||
if(!dev_priv->cce_ring) {
|
||||
DRM_ERROR("could not find cce ring region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->ring_rptr, init->ring_rptr_offset );
|
||||
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
|
||||
if(!dev_priv->ring_rptr) {
|
||||
DRM_ERROR("could not find ring read pointer!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
|
||||
if(!dev_priv->buffers) {
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if(!dev->agp_buffer_map) {
|
||||
DRM_ERROR("could not find dma buffer region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce( dev );
|
||||
|
@ -497,8 +497,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
}
|
||||
|
||||
if ( !dev_priv->is_pci ) {
|
||||
DRM_FIND_MAP( dev_priv->agp_textures,
|
||||
init->agp_textures_offset );
|
||||
dev_priv->agp_textures = drm_core_findmap(dev, init->agp_textures_offset);
|
||||
if(!dev_priv->agp_textures) {
|
||||
DRM_ERROR("could not find agp texture region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -513,12 +512,12 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( !dev_priv->is_pci ) {
|
||||
DRM_IOREMAP( dev_priv->cce_ring, dev );
|
||||
DRM_IOREMAP( dev_priv->ring_rptr, dev );
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
drm_core_ioremap( dev_priv->cce_ring, dev );
|
||||
drm_core_ioremap( dev_priv->ring_rptr, dev );
|
||||
drm_core_ioremap( dev->agp_buffer_map, dev );
|
||||
if(!dev_priv->cce_ring->handle ||
|
||||
!dev_priv->ring_rptr->handle ||
|
||||
!dev_priv->buffers->handle) {
|
||||
!dev->agp_buffer_map->handle) {
|
||||
DRM_ERROR("Could not ioremap agp regions!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce( dev );
|
||||
|
@ -531,7 +530,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
(void *)dev_priv->cce_ring->offset;
|
||||
dev_priv->ring_rptr->handle =
|
||||
(void *)dev_priv->ring_rptr->offset;
|
||||
dev_priv->buffers->handle = (void *)dev_priv->buffers->offset;
|
||||
dev->agp_buffer_map->handle = (void *)dev->agp_buffer_map->offset;
|
||||
}
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
|
@ -601,11 +600,11 @@ int r128_do_cleanup_cce( drm_device_t *dev )
|
|||
#if __REALLY_HAVE_AGP
|
||||
if ( !dev_priv->is_pci ) {
|
||||
if ( dev_priv->cce_ring != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->cce_ring, dev );
|
||||
drm_core_ioremapfree( dev_priv->cce_ring, dev );
|
||||
if ( dev_priv->ring_rptr != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->ring_rptr, dev );
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
drm_core_ioremapfree( dev_priv->ring_rptr, dev );
|
||||
if ( dev->agp_buffer_map != NULL )
|
||||
drm_core_ioremapfree( dev->agp_buffer_map, dev );
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
|
|
@ -100,7 +100,6 @@ typedef struct drm_r128_private {
|
|||
drm_local_map_t *mmio;
|
||||
drm_local_map_t *cce_ring;
|
||||
drm_local_map_t *ring_rptr;
|
||||
drm_local_map_t *buffers;
|
||||
drm_local_map_t *agp_textures;
|
||||
} drm_r128_private_t;
|
||||
|
||||
|
|
|
@ -667,7 +667,7 @@ static void r128_cce_dispatch_indirect( drm_device_t *dev,
|
|||
*/
|
||||
if ( dwords & 1 ) {
|
||||
u32 *data = (u32 *)
|
||||
((char *)dev_priv->buffers->handle
|
||||
((char *)dev->agp_buffer_map->handle
|
||||
+ buf->offset + start);
|
||||
data[dwords++] = cpu_to_le32( R128_CCE_PACKET2 );
|
||||
}
|
||||
|
@ -713,7 +713,7 @@ static void r128_cce_dispatch_indices( drm_device_t *dev,
|
|||
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
|
||||
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
|
||||
int format = sarea_priv->vc_format;
|
||||
int offset = dev_priv->buffers->offset - dev_priv->cce_buffers_offset;
|
||||
int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
|
||||
int prim = buf_priv->prim;
|
||||
u32 *data;
|
||||
int dwords;
|
||||
|
@ -733,7 +733,7 @@ static void r128_cce_dispatch_indices( drm_device_t *dev,
|
|||
|
||||
dwords = (end - start + 3) / sizeof(u32);
|
||||
|
||||
data = (u32 *)((char *)dev_priv->buffers->handle
|
||||
data = (u32 *)((char *)dev->agp_buffer_map->handle
|
||||
+ buf->offset + start);
|
||||
|
||||
data[0] = cpu_to_le32( CCE_PACKET3( R128_3D_RNDR_GEN_INDX_PRIM,
|
||||
|
@ -857,7 +857,7 @@ static int r128_cce_dispatch_blit( DRMFILE filp,
|
|||
|
||||
dwords = (blit->width * blit->height) >> dword_shift;
|
||||
|
||||
data = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);
|
||||
data = (u32 *)((char *)dev->agp_buffer_map->handle + buf->offset);
|
||||
|
||||
data[0] = cpu_to_le32( CCE_PACKET3( R128_CNTL_HOSTDATA_BLT, dwords + 6 ) );
|
||||
data[1] = cpu_to_le32( (R128_GMC_DST_PITCH_OFFSET_CNTL |
|
||||
|
@ -1694,3 +1694,25 @@ int r128_getparam( DRM_IOCTL_ARGS )
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void r128_driver_prerelease(drm_device_t *dev, DRMFILE filp)
|
||||
{
|
||||
if ( dev->dev_private ) {
|
||||
drm_r128_private_t *dev_priv = dev->dev_private;
|
||||
if ( dev_priv->page_flipping ) {
|
||||
r128_do_cleanup_pageflip( dev );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void r128_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
r128_do_cleanup_cce( dev );
|
||||
}
|
||||
|
||||
void r128_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
dev->dev_priv_size = sizeof(drm_r128_buf_priv_t);
|
||||
dev->fn_tbl.prerelease = r128_driver_prerelease;
|
||||
dev->fn_tbl.pretakedown = r128_driver_pretakedown;
|
||||
}
|
||||
|
|
|
@ -1108,33 +1108,33 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
|
||||
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
|
||||
if(!dev_priv->mmio) {
|
||||
DRM_ERROR("could not find mmio region!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->cp_ring, init->ring_offset );
|
||||
dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
|
||||
if(!dev_priv->cp_ring) {
|
||||
DRM_ERROR("could not find cp ring region!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->ring_rptr, init->ring_rptr_offset );
|
||||
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
|
||||
if(!dev_priv->ring_rptr) {
|
||||
DRM_ERROR("could not find ring read pointer!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
|
||||
if(!dev_priv->buffers) {
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if(!dev->agp_buffer_map) {
|
||||
DRM_ERROR("could not find dma buffer region!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
if ( init->gart_textures_offset ) {
|
||||
DRM_FIND_MAP( dev_priv->gart_textures, init->gart_textures_offset );
|
||||
dev_priv->gart_textures = drm_core_findmap(dev, init->gart_textures_offset);
|
||||
if ( !dev_priv->gart_textures ) {
|
||||
DRM_ERROR("could not find GART texture region!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
|
@ -1147,13 +1147,13 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
init->sarea_priv_offset);
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
DRM_IOREMAP( dev_priv->cp_ring, dev );
|
||||
DRM_IOREMAP( dev_priv->ring_rptr, dev );
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
if ( dev_priv->flags & CHIP_IS_AGP ) {
|
||||
drm_core_ioremap( dev_priv->cp_ring, dev );
|
||||
drm_core_ioremap( dev_priv->ring_rptr, dev );
|
||||
drm_core_ioremap( dev->agp_buffer_map, dev );
|
||||
if(!dev_priv->cp_ring->handle ||
|
||||
!dev_priv->ring_rptr->handle ||
|
||||
!dev_priv->buffers->handle) {
|
||||
!dev->agp_buffer_map->handle) {
|
||||
DRM_ERROR("could not find ioremap agp regions!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
|
@ -1165,14 +1165,14 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
(void *)dev_priv->cp_ring->offset;
|
||||
dev_priv->ring_rptr->handle =
|
||||
(void *)dev_priv->ring_rptr->offset;
|
||||
dev_priv->buffers->handle = (void *)dev_priv->buffers->offset;
|
||||
dev->agp_buffer_map->handle = (void *)dev->agp_buffer_map->offset;
|
||||
|
||||
DRM_DEBUG( "dev_priv->cp_ring->handle %p\n",
|
||||
dev_priv->cp_ring->handle );
|
||||
DRM_DEBUG( "dev_priv->ring_rptr->handle %p\n",
|
||||
dev_priv->ring_rptr->handle );
|
||||
DRM_DEBUG( "dev_priv->buffers->handle %p\n",
|
||||
dev_priv->buffers->handle );
|
||||
DRM_DEBUG( "dev->agp_buffer_map->handle %p\n",
|
||||
dev->agp_buffer_map->handle );
|
||||
}
|
||||
|
||||
dev_priv->fb_location = ( RADEON_READ( RADEON_MC_FB_LOCATION )
|
||||
|
@ -1197,12 +1197,12 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP)
|
||||
dev_priv->gart_buffers_offset = (dev_priv->buffers->offset
|
||||
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
|
||||
- dev->agp->base
|
||||
+ dev_priv->gart_vm_start);
|
||||
else
|
||||
#endif
|
||||
dev_priv->gart_buffers_offset = (dev_priv->buffers->offset
|
||||
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
|
||||
- dev->sg->handle
|
||||
+ dev_priv->gart_vm_start);
|
||||
|
||||
|
@ -1268,19 +1268,20 @@ int radeon_do_cleanup_cp( drm_device_t *dev )
|
|||
#if __REALLY_HAVE_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
if ( dev_priv->cp_ring != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->cp_ring, dev );
|
||||
drm_core_ioremapfree( dev_priv->cp_ring, dev );
|
||||
if ( dev_priv->ring_rptr != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->ring_rptr, dev );
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
drm_core_ioremapfree( dev_priv->ring_rptr, dev );
|
||||
if ( dev->agp_buffer_map != NULL )
|
||||
drm_core_ioremapfree( dev->agp_buffer_map, dev );
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (!DRM(ati_pcigart_cleanup)( dev,
|
||||
dev_priv->phys_pci_gart,
|
||||
dev_priv->bus_pci_gart ))
|
||||
dev_priv->phys_pci_gart,
|
||||
dev_priv->bus_pci_gart ))
|
||||
DRM_ERROR( "failed to cleanup PCI GART!\n" );
|
||||
}
|
||||
|
||||
{
|
||||
int flags = dev_priv->flags;
|
||||
memset(dev_priv, 0, sizeof(*dev_priv));
|
||||
|
@ -1734,12 +1735,12 @@ static int radeon_register_regions(struct pci_dev *pdev) {
|
|||
/* request the mem regions */
|
||||
if (!request_mem_region (pci_resource_start( pdev, 2 ),
|
||||
pci_resource_len(pdev, 2), DRIVER_NAME)) {
|
||||
printk(KERN_ERR DRIVER_NAME ": cannot reserve MMIO region\n");
|
||||
DRM_ERROR("cannot reserve MMIO region\n");
|
||||
return retcode;
|
||||
}
|
||||
if (!request_mem_region (pci_resource_start( pdev, 0 ),
|
||||
pci_resource_len(pdev, 0), DRIVER_NAME)) {
|
||||
printk(KERN_ERR DRIVER_NAME ": cannot reserve FB region\n");
|
||||
DRM_ERROR("cannot reserve FB region\n");
|
||||
return retcode;
|
||||
}
|
||||
return 0;
|
||||
|
@ -1751,7 +1752,7 @@ static void radeon_release_regions(struct pci_dev *pdev) {
|
|||
}
|
||||
|
||||
/* Always create a map record for MMIO and FB memory, done from DRIVER_POSTINIT */
|
||||
int radeon_preinit( drm_device_t *dev, unsigned long flags )
|
||||
int radeon_preinit( struct drm_device *dev, unsigned long flags )
|
||||
{
|
||||
int retcode = -EINVAL;
|
||||
u32 save, temp;
|
||||
|
@ -1788,12 +1789,12 @@ int radeon_preinit( drm_device_t *dev, unsigned long flags )
|
|||
return 0;
|
||||
}
|
||||
|
||||
int radeon_postinit( drm_device_t *dev, unsigned long flags )
|
||||
int radeon_postinit( struct drm_device *dev, unsigned long flags )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_postcleanup( drm_device_t *dev )
|
||||
int radeon_postcleanup( struct drm_device *dev )
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -1803,4 +1804,5 @@ void radeon_postcleanup( drm_device_t *dev )
|
|||
radeon_release_regions(dev->pdev);
|
||||
|
||||
dev->dev_private = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ struct mem_block {
|
|||
|
||||
typedef struct drm_radeon_private {
|
||||
|
||||
u32 flags; /* see radeon_chip_flags */
|
||||
uint32_t flags; /* see radeon_chip_flags */
|
||||
|
||||
drm_radeon_ring_buffer_t ring;
|
||||
drm_radeon_sarea_t *sarea_priv;
|
||||
|
@ -152,7 +152,6 @@ typedef struct drm_radeon_private {
|
|||
drm_local_map_t *mmio;
|
||||
drm_local_map_t *cp_ring;
|
||||
drm_local_map_t *ring_rptr;
|
||||
drm_local_map_t *buffers;
|
||||
drm_local_map_t *gart_textures;
|
||||
|
||||
struct mem_block *gart_heap;
|
||||
|
@ -738,9 +737,9 @@ do { \
|
|||
} while (0)
|
||||
|
||||
extern int RADEON_READ_PLL( drm_device_t *dev, int addr );
|
||||
extern int radeon_preinit( drm_device_t *dev, unsigned long flags );
|
||||
extern int radeon_postinit( drm_device_t *dev, unsigned long flags );
|
||||
extern void radeon_postcleanup( drm_device_t *dev );
|
||||
extern int radeon_preinit( struct drm_device *dev, unsigned long flags );
|
||||
extern int radeon_postinit( struct drm_device *dev, unsigned long flags );
|
||||
extern int radeon_postcleanup( struct drm_device *dev );
|
||||
|
||||
#define CP_PACKET0( reg, n ) \
|
||||
(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
|
||||
|
|
|
@ -1247,7 +1247,7 @@ static void radeon_cp_dispatch_indirect( drm_device_t *dev,
|
|||
*/
|
||||
if ( dwords & 1 ) {
|
||||
u32 *data = (u32 *)
|
||||
((char *)dev_priv->buffers->handle
|
||||
((char *)dev->agp_buffer_map->handle
|
||||
+ buf->offset + start);
|
||||
data[dwords++] = RADEON_CP_PACKET2;
|
||||
}
|
||||
|
@ -1301,7 +1301,7 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
|
|||
|
||||
dwords = (prim->finish - prim->start + 3) / sizeof(u32);
|
||||
|
||||
data = (u32 *)((char *)dev_priv->buffers->handle +
|
||||
data = (u32 *)((char *)dev->agp_buffer_map->handle +
|
||||
elt_buf->offset + prim->start);
|
||||
|
||||
data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 );
|
||||
|
@ -1445,7 +1445,7 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
|
|||
|
||||
/* Dispatch the indirect buffer.
|
||||
*/
|
||||
buffer = (u32*)((char*)dev_priv->buffers->handle + buf->offset);
|
||||
buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset);
|
||||
dwords = size / 4;
|
||||
buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
|
||||
buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
|
||||
|
@ -2547,3 +2547,46 @@ int radeon_cp_setparam( DRM_IOCTL_ARGS ) {
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* When a client dies:
|
||||
* - Check for and clean up flipped page state
|
||||
* - Free any alloced GART memory.
|
||||
*
|
||||
* DRM infrastructure takes care of reclaiming dma buffers.
|
||||
*/
|
||||
static void radeon_driver_prerelease(drm_device_t *dev, DRMFILE filp)
|
||||
{
|
||||
if ( dev->dev_private ) {
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
if ( dev_priv->page_flipping ) {
|
||||
radeon_do_cleanup_pageflip( dev );
|
||||
}
|
||||
radeon_mem_release( filp, dev_priv->gart_heap );
|
||||
radeon_mem_release( filp, dev_priv->fb_heap );
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
radeon_do_release(dev);
|
||||
}
|
||||
|
||||
static void radeon_driver_open_helper(drm_device_t *dev, drm_file_t *filp_priv)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
if ( dev_priv )
|
||||
filp_priv->radeon_fb_delta = dev_priv->fb_location;
|
||||
else
|
||||
filp_priv->radeon_fb_delta = 0;
|
||||
}
|
||||
|
||||
void radeon_driver_register_fns(struct drm_device *dev)
|
||||
{
|
||||
dev->dev_priv_size = sizeof(drm_radeon_buf_priv_t);
|
||||
dev->fn_tbl.preinit = radeon_preinit;
|
||||
dev->fn_tbl.postinit = radeon_postinit;
|
||||
dev->fn_tbl.postcleanup = radeon_postcleanup;
|
||||
dev->fn_tbl.prerelease = radeon_driver_prerelease;
|
||||
dev->fn_tbl.pretakedown = radeon_driver_pretakedown;
|
||||
dev->fn_tbl.open_helper = radeon_driver_open_helper;
|
||||
}
|
||||
|
|
|
@ -31,8 +31,6 @@
|
|||
#include "sis_ds.h"
|
||||
|
||||
typedef struct drm_sis_private {
|
||||
drm_map_t *buffers;
|
||||
|
||||
memHeap_t *AGPHeap;
|
||||
memHeap_t *FBHeap;
|
||||
} drm_sis_private_t;
|
||||
|
|
|
@ -330,7 +330,7 @@ int sis_ioctl_agp_free( DRM_IOCTL_ARGS )
|
|||
return 0;
|
||||
}
|
||||
|
||||
int sis_init_context(drm_device_t *dev, int context)
|
||||
int sis_init_context(struct drm_device *dev, int context)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -362,7 +362,7 @@ int sis_init_context(drm_device_t *dev, int context)
|
|||
return 1;
|
||||
}
|
||||
|
||||
int sis_final_context(drm_device_t *dev, int context)
|
||||
int sis_final_context(struct drm_device *dev, int context)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -408,3 +408,9 @@ int sis_final_context(drm_device_t *dev, int context)
|
|||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void DRM(driver_register_fns)(drm_device_t *dev)
|
||||
{
|
||||
dev->fn_tbl.context_ctor = sis_init_context;
|
||||
dev->fn_tbl.context_dtor = sis_final_context;
|
||||
}
|
||||
|
|
|
@ -65,3 +65,4 @@
|
|||
#include "drm_proc.h"
|
||||
#include "drm_vm.h"
|
||||
#include "drm_stub.h"
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@ typedef struct drm_via_private {
|
|||
drm_map_t *fb;
|
||||
drm_map_t *mmio;
|
||||
unsigned long agpAddr;
|
||||
drm_map_t *buffers;
|
||||
wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
|
||||
} drm_via_private_t;
|
||||
|
||||
|
|
|
@ -47,14 +47,14 @@ int via_do_init_map(drm_device_t *dev, drm_via_init_t *init)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_FIND_MAP(dev_priv->fb, init->fb_offset);
|
||||
dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
|
||||
if (!dev_priv->fb) {
|
||||
DRM_ERROR("could not find framebuffer!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
via_do_cleanup_map(dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_FIND_MAP(dev_priv->mmio, init->mmio_offset);
|
||||
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
|
||||
if (!dev_priv->mmio) {
|
||||
DRM_ERROR("could not find mmio region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
|
|
@ -103,7 +103,7 @@ int via_fb_init( DRM_IOCTL_ARGS )
|
|||
return 0;
|
||||
}
|
||||
|
||||
int via_init_context(drm_device_t *dev, int context)
|
||||
int via_init_context(struct drm_device *dev, int context)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -134,7 +134,7 @@ int via_init_context(drm_device_t *dev, int context)
|
|||
return 1;
|
||||
}
|
||||
|
||||
int via_final_context(drm_device_t *dev, int context)
|
||||
int via_final_context(struct drm_device *dev, int context)
|
||||
{
|
||||
int i;
|
||||
for (i=0; i<MAX_CONTEXT; i++)
|
||||
|
@ -345,3 +345,9 @@ int via_agp_free(drm_via_mem_t* mem)
|
|||
|
||||
EXPORT_SYMBOL(via_fb_alloc);
|
||||
EXPORT_SYMBOL(via_fb_free);
|
||||
|
||||
void DRM(driver_register_fns)(drm_device_t *dev)
|
||||
{
|
||||
dev->fn_tbl.context_ctor = via_init_context;
|
||||
dev->fn_tbl.context_dtor = via_final_context;
|
||||
}
|
||||
|
|
|
@ -57,31 +57,6 @@
|
|||
#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY
|
||||
#define __HAVE_COUNTER9 _DRM_STAT_DMA
|
||||
|
||||
/* Driver customization:
|
||||
*/
|
||||
#define DRIVER_PRETAKEDOWN(dev) do { \
|
||||
if ( dev->dev_private ) { \
|
||||
drm_i915_private_t *dev_priv = dev->dev_private; \
|
||||
i915_mem_takedown( &(dev_priv->agp_heap) ); \
|
||||
} \
|
||||
i915_dma_cleanup( dev ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* When a client dies:
|
||||
* - Free any alloced agp memory.
|
||||
*/
|
||||
#define DRIVER_PRERELEASE() \
|
||||
do { \
|
||||
if ( dev->dev_private ) { \
|
||||
drm_i915_private_t *dev_priv = dev->dev_private; \
|
||||
i915_mem_release( dev, filp, dev_priv->agp_heap ); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
||||
|
||||
/* We use our own dma mechanisms, not the drm template code. However,
|
||||
* the shared IRQ code is useful to us:
|
||||
*/
|
||||
|
|
|
@ -94,7 +94,7 @@ int i915_dma_cleanup(drm_device_t *dev)
|
|||
(drm_i915_private_t *) dev->dev_private;
|
||||
|
||||
if (dev_priv->ring.virtual_start) {
|
||||
DRM_IOREMAPFREE( &dev_priv->ring.map, dev);
|
||||
drm_core_ioremapfree( &dev_priv->ring.map, dev);
|
||||
}
|
||||
|
||||
if (dev_priv->hw_status_page) {
|
||||
|
@ -135,8 +135,8 @@ static int i915_initialize(drm_device_t *dev,
|
|||
i915_dma_cleanup(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
|
||||
|
||||
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
|
||||
if(!dev_priv->mmio_map) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i915_dma_cleanup(dev);
|
||||
|
@ -159,7 +159,7 @@ static int i915_initialize(drm_device_t *dev,
|
|||
dev_priv->ring.map.flags = 0;
|
||||
dev_priv->ring.map.mtrr = 0;
|
||||
|
||||
DRM_IOREMAP( &dev_priv->ring.map, dev );
|
||||
drm_core_ioremap( &dev_priv->ring.map, dev );
|
||||
|
||||
if (dev_priv->ring.map.handle == NULL) {
|
||||
dev->dev_private = (void *) dev_priv;
|
||||
|
@ -770,3 +770,26 @@ int i915_setparam( DRM_IOCTL_ARGS )
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
if ( dev->dev_private ) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
i915_mem_takedown( &(dev_priv->agp_heap) );
|
||||
}
|
||||
i915_dma_cleanup( dev );
|
||||
}
|
||||
|
||||
static void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp)
|
||||
{
|
||||
if ( dev->dev_private ) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
i915_mem_release( dev, filp, dev_priv->agp_heap );
|
||||
}
|
||||
}
|
||||
|
||||
void i915_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
dev->fn_tbl.pretakedown = i915_driver_pretakedown;
|
||||
dev->fn_tbl.prerelease = i915_driver_prerelease;
|
||||
}
|
||||
|
|
|
@ -71,12 +71,6 @@
|
|||
[DRM_IOCTL_NR(DRM_IOCTL_MACH64_FLUSH)] = { mach64_dma_flush, 1, 0 }, \
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MACH64_GETPARAM)] = { mach64_get_param, 1, 0 }
|
||||
|
||||
/* Driver customization:
|
||||
*/
|
||||
#define DRIVER_PRETAKEDOWN(dev) do { \
|
||||
mach64_do_cleanup_dma( dev ); \
|
||||
} while (0)
|
||||
|
||||
/* DMA customization:
|
||||
*/
|
||||
#define __HAVE_DMA 1
|
||||
|
@ -85,10 +79,4 @@
|
|||
#define __HAVE_SHARED_IRQ 1
|
||||
#define __HAVE_IRQ 1
|
||||
|
||||
/* Buffer customization:
|
||||
*/
|
||||
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
|
||||
((drm_mach64_private_t *)((dev)->dev_private))->buffers
|
||||
|
||||
#endif
|
||||
|
|
|
@ -655,14 +655,14 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
mach64_do_cleanup_dma(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->fb, init->fb_offset );
|
||||
dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
|
||||
if (!dev_priv->fb) {
|
||||
DRM_ERROR("can not find frame buffer map!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
mach64_do_cleanup_dma(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
|
||||
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
|
||||
if (!dev_priv->mmio) {
|
||||
DRM_ERROR("can not find mmio map!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -675,14 +675,14 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
init->sarea_priv_offset);
|
||||
|
||||
if( !dev_priv->is_pci ) {
|
||||
DRM_FIND_MAP( dev_priv->ring_map, init->ring_offset );
|
||||
dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
|
||||
if ( !dev_priv->ring_map ) {
|
||||
DRM_ERROR( "can not find ring map!\n" );
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
mach64_do_cleanup_dma(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_IOREMAP( dev_priv->ring_map, dev );
|
||||
drm_core_ioremap( dev_priv->ring_map, dev );
|
||||
if ( !dev_priv->ring_map->handle ) {
|
||||
DRM_ERROR( "can not ioremap virtual address for"
|
||||
" descriptor ring\n" );
|
||||
|
@ -690,23 +690,26 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
mach64_do_cleanup_dma( dev );
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
|
||||
if ( !dev_priv->buffers ) {
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if ( !dev->agp_buffer_map ) {
|
||||
DRM_ERROR( "can not find dma buffer map!\n" );
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
mach64_do_cleanup_dma( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
if ( !dev_priv->buffers->handle ) {
|
||||
/* there might be a nicer way to do this -
|
||||
dev isn't passed all the way though the mach64 - DA */
|
||||
dev_priv->dev_buffers = dev->agp_buffer_map;
|
||||
|
||||
drm_core_ioremap( dev->agp_buffer_map, dev );
|
||||
if ( !dev->agp_buffer_map->handle ) {
|
||||
DRM_ERROR( "can not ioremap virtual address for"
|
||||
" dma buffer\n" );
|
||||
dev->dev_private = (void *) dev_priv;
|
||||
mach64_do_cleanup_dma( dev );
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->agp_textures,
|
||||
init->agp_textures_offset );
|
||||
dev_priv->agp_textures = drm_core_findmap(dev, init->agp_textures_offset);
|
||||
if (!dev_priv->agp_textures) {
|
||||
DRM_ERROR( "can not find agp texture region!\n" );
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -987,11 +990,11 @@ int mach64_do_cleanup_dma( drm_device_t *dev )
|
|||
}
|
||||
} else {
|
||||
if ( dev_priv->ring_map )
|
||||
DRM_IOREMAPFREE( dev_priv->ring_map, dev );
|
||||
drm_core_ioremapfree( dev_priv->ring_map, dev );
|
||||
}
|
||||
|
||||
if ( dev_priv->buffers )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
if ( dev->agp_buffer_map )
|
||||
drm_core_ioremapfree( dev->agp_buffer_map, dev );
|
||||
|
||||
mach64_destroy_freelist( dev );
|
||||
|
||||
|
@ -1323,3 +1326,12 @@ int mach64_dma_buffers( DRM_IOCTL_ARGS )
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void mach64_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
mach64_do_cleanup_dma( dev );
|
||||
}
|
||||
|
||||
void mach64_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
dev->fn_tbl.pretakedown = mach64_driver_pretakedown;
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ typedef struct drm_mach64_private {
|
|||
drm_local_map_t *fb;
|
||||
drm_local_map_t *mmio;
|
||||
drm_local_map_t *ring_map;
|
||||
drm_local_map_t *buffers;
|
||||
drm_local_map_t *dev_buffers; /* this is a pointer to a structure in dev */
|
||||
drm_local_map_t *agp_textures;
|
||||
} drm_mach64_private_t;
|
||||
|
||||
|
@ -791,7 +791,7 @@ do { \
|
|||
#define GETBUFPTR( __buf ) \
|
||||
((dev_priv->is_pci) ? \
|
||||
((u32 *)(__buf)->address) : \
|
||||
((u32 *)((char *)dev_priv->buffers->handle + (__buf)->offset)))
|
||||
((u32 *)((char *)dev_priv->dev_buffers->handle + (__buf)->offset)))
|
||||
|
||||
#define GETBUFADDR( __buf ) ((u32)(__buf)->bus_address)
|
||||
|
||||
|
|
19
shared/mga.h
19
shared/mga.h
|
@ -69,12 +69,6 @@
|
|||
#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY
|
||||
#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY
|
||||
|
||||
/* Driver customization:
|
||||
*/
|
||||
#define DRIVER_PRETAKEDOWN( dev ) do { \
|
||||
mga_do_cleanup_dma( dev ); \
|
||||
} while (0)
|
||||
|
||||
/* DMA customization:
|
||||
*/
|
||||
#define __HAVE_DMA 1
|
||||
|
@ -82,17 +76,4 @@
|
|||
#define __HAVE_VBL_IRQ 1
|
||||
#define __HAVE_SHARED_IRQ 1
|
||||
|
||||
#define __HAVE_DMA_QUIESCENT 1
|
||||
#define DRIVER_DMA_QUIESCENT() do { \
|
||||
drm_mga_private_t *dev_priv = dev->dev_private; \
|
||||
return mga_do_wait_for_idle( dev_priv ); \
|
||||
} while (0)
|
||||
|
||||
/* Buffer customization:
|
||||
*/
|
||||
#define DRIVER_BUF_PRIV_T drm_mga_buf_priv_t
|
||||
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
|
||||
((drm_mga_private_t *)((dev)->dev_private))->buffers
|
||||
|
||||
#endif
|
||||
|
|
|
@ -500,7 +500,7 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
|
||||
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
|
||||
if(!dev_priv->mmio) {
|
||||
DRM_ERROR( "failed to find mmio region!\n" );
|
||||
/* Assign dev_private so we can do cleanup. */
|
||||
|
@ -508,7 +508,7 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
mga_do_cleanup_dma( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->status, init->status_offset );
|
||||
dev_priv->status = drm_core_findmap(dev, init->status_offset);
|
||||
if(!dev_priv->status) {
|
||||
DRM_ERROR( "failed to find status page!\n" );
|
||||
/* Assign dev_private so we can do cleanup. */
|
||||
|
@ -516,8 +516,7 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
mga_do_cleanup_dma( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_FIND_MAP( dev_priv->warp, init->warp_offset );
|
||||
dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
|
||||
if(!dev_priv->warp) {
|
||||
DRM_ERROR( "failed to find warp microcode region!\n" );
|
||||
/* Assign dev_private so we can do cleanup. */
|
||||
|
@ -525,7 +524,7 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
mga_do_cleanup_dma( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->primary, init->primary_offset );
|
||||
dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
|
||||
if(!dev_priv->primary) {
|
||||
DRM_ERROR( "failed to find primary dma region!\n" );
|
||||
/* Assign dev_private so we can do cleanup. */
|
||||
|
@ -533,8 +532,8 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
mga_do_cleanup_dma( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
|
||||
if(!dev_priv->buffers) {
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if(!dev->agp_buffer_map) {
|
||||
DRM_ERROR( "failed to find dma buffer region!\n" );
|
||||
/* Assign dev_private so we can do cleanup. */
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -546,13 +545,13 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
(drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
|
||||
init->sarea_priv_offset);
|
||||
|
||||
DRM_IOREMAP( dev_priv->warp, dev );
|
||||
DRM_IOREMAP( dev_priv->primary, dev );
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
drm_core_ioremap( dev_priv->warp, dev );
|
||||
drm_core_ioremap( dev_priv->primary, dev );
|
||||
drm_core_ioremap( dev->agp_buffer_map, dev );
|
||||
|
||||
if(!dev_priv->warp->handle ||
|
||||
!dev_priv->primary->handle ||
|
||||
!dev_priv->buffers->handle ) {
|
||||
!dev->agp_buffer_map->handle ) {
|
||||
DRM_ERROR( "failed to ioremap agp regions!\n" );
|
||||
/* Assign dev_private so we can do cleanup. */
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -643,11 +642,11 @@ int mga_do_cleanup_dma( drm_device_t *dev )
|
|||
drm_mga_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if ( dev_priv->warp != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->warp, dev );
|
||||
drm_core_ioremapfree( dev_priv->warp, dev );
|
||||
if ( dev_priv->primary != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->primary, dev );
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
drm_core_ioremapfree( dev_priv->primary, dev );
|
||||
if ( dev->agp_buffer_map != NULL )
|
||||
drm_core_ioremapfree( dev->agp_buffer_map, dev );
|
||||
|
||||
if ( dev_priv->head != NULL ) {
|
||||
mga_freelist_cleanup( dev );
|
||||
|
@ -800,3 +799,20 @@ int mga_dma_buffers( DRM_IOCTL_ARGS )
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mga_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
mga_do_cleanup_dma( dev );
|
||||
}
|
||||
|
||||
static int mga_driver_dma_quiescent(drm_device_t *dev)
|
||||
{
|
||||
drm_mga_private_t *dev_priv = dev->dev_private;
|
||||
return mga_do_wait_for_idle( dev_priv );
|
||||
}
|
||||
|
||||
void mga_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
dev->fn_tbl.pretakedown = mga_driver_pretakedown;
|
||||
dev->fn_tbl.dma_quiescent = mga_driver_dma_quiescent;
|
||||
}
|
||||
|
|
|
@ -79,20 +79,6 @@
|
|||
[DRM_IOCTL_NR(DRM_IOCTL_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 }, \
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_R128_GETPARAM)] = { r128_getparam, 1, 0 },
|
||||
|
||||
/* Driver customization:
|
||||
*/
|
||||
#define DRIVER_PRERELEASE() do { \
|
||||
if ( dev->dev_private ) { \
|
||||
drm_r128_private_t *dev_priv = dev->dev_private; \
|
||||
if ( dev_priv->page_flipping ) { \
|
||||
r128_do_cleanup_pageflip( dev ); \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define DRIVER_PRETAKEDOWN(dev) do { \
|
||||
r128_do_cleanup_cce( dev ); \
|
||||
} while (0)
|
||||
|
||||
/* DMA customization:
|
||||
*/
|
||||
|
@ -101,20 +87,4 @@
|
|||
#define __HAVE_VBL_IRQ 1
|
||||
#define __HAVE_SHARED_IRQ 1
|
||||
|
||||
#if 0
|
||||
/* GH: Remove this for now... */
|
||||
#define __HAVE_DMA_QUIESCENT 1
|
||||
#define DRIVER_DMA_QUIESCENT() do { \
|
||||
drm_r128_private_t *dev_priv = dev->dev_private; \
|
||||
return r128_do_cce_idle( dev_priv ); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Buffer customization:
|
||||
*/
|
||||
#define DRIVER_BUF_PRIV_T drm_r128_buf_priv_t
|
||||
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
|
||||
((drm_r128_private_t *)((dev)->dev_private))->buffers
|
||||
|
||||
#endif
|
||||
|
|
|
@ -467,29 +467,29 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
|
||||
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
|
||||
if(!dev_priv->mmio) {
|
||||
DRM_ERROR("could not find mmio region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->cce_ring, init->ring_offset );
|
||||
dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
|
||||
if(!dev_priv->cce_ring) {
|
||||
DRM_ERROR("could not find cce ring region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->ring_rptr, init->ring_rptr_offset );
|
||||
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
|
||||
if(!dev_priv->ring_rptr) {
|
||||
DRM_ERROR("could not find ring read pointer!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce( dev );
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
|
||||
if(!dev_priv->buffers) {
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if(!dev->agp_buffer_map) {
|
||||
DRM_ERROR("could not find dma buffer region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce( dev );
|
||||
|
@ -497,8 +497,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
}
|
||||
|
||||
if ( !dev_priv->is_pci ) {
|
||||
DRM_FIND_MAP( dev_priv->agp_textures,
|
||||
init->agp_textures_offset );
|
||||
dev_priv->agp_textures = drm_core_findmap(dev, init->agp_textures_offset);
|
||||
if(!dev_priv->agp_textures) {
|
||||
DRM_ERROR("could not find agp texture region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -513,12 +512,12 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( !dev_priv->is_pci ) {
|
||||
DRM_IOREMAP( dev_priv->cce_ring, dev );
|
||||
DRM_IOREMAP( dev_priv->ring_rptr, dev );
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
drm_core_ioremap( dev_priv->cce_ring, dev );
|
||||
drm_core_ioremap( dev_priv->ring_rptr, dev );
|
||||
drm_core_ioremap( dev->agp_buffer_map, dev );
|
||||
if(!dev_priv->cce_ring->handle ||
|
||||
!dev_priv->ring_rptr->handle ||
|
||||
!dev_priv->buffers->handle) {
|
||||
!dev->agp_buffer_map->handle) {
|
||||
DRM_ERROR("Could not ioremap agp regions!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
r128_do_cleanup_cce( dev );
|
||||
|
@ -531,7 +530,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
(void *)dev_priv->cce_ring->offset;
|
||||
dev_priv->ring_rptr->handle =
|
||||
(void *)dev_priv->ring_rptr->offset;
|
||||
dev_priv->buffers->handle = (void *)dev_priv->buffers->offset;
|
||||
dev->agp_buffer_map->handle = (void *)dev->agp_buffer_map->offset;
|
||||
}
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
|
@ -601,11 +600,11 @@ int r128_do_cleanup_cce( drm_device_t *dev )
|
|||
#if __REALLY_HAVE_AGP
|
||||
if ( !dev_priv->is_pci ) {
|
||||
if ( dev_priv->cce_ring != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->cce_ring, dev );
|
||||
drm_core_ioremapfree( dev_priv->cce_ring, dev );
|
||||
if ( dev_priv->ring_rptr != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->ring_rptr, dev );
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
drm_core_ioremapfree( dev_priv->ring_rptr, dev );
|
||||
if ( dev->agp_buffer_map != NULL )
|
||||
drm_core_ioremapfree( dev->agp_buffer_map, dev );
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
|
|
@ -100,7 +100,6 @@ typedef struct drm_r128_private {
|
|||
drm_local_map_t *mmio;
|
||||
drm_local_map_t *cce_ring;
|
||||
drm_local_map_t *ring_rptr;
|
||||
drm_local_map_t *buffers;
|
||||
drm_local_map_t *agp_textures;
|
||||
} drm_r128_private_t;
|
||||
|
||||
|
|
|
@ -667,7 +667,7 @@ static void r128_cce_dispatch_indirect( drm_device_t *dev,
|
|||
*/
|
||||
if ( dwords & 1 ) {
|
||||
u32 *data = (u32 *)
|
||||
((char *)dev_priv->buffers->handle
|
||||
((char *)dev->agp_buffer_map->handle
|
||||
+ buf->offset + start);
|
||||
data[dwords++] = cpu_to_le32( R128_CCE_PACKET2 );
|
||||
}
|
||||
|
@ -713,7 +713,7 @@ static void r128_cce_dispatch_indices( drm_device_t *dev,
|
|||
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
|
||||
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
|
||||
int format = sarea_priv->vc_format;
|
||||
int offset = dev_priv->buffers->offset - dev_priv->cce_buffers_offset;
|
||||
int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
|
||||
int prim = buf_priv->prim;
|
||||
u32 *data;
|
||||
int dwords;
|
||||
|
@ -733,7 +733,7 @@ static void r128_cce_dispatch_indices( drm_device_t *dev,
|
|||
|
||||
dwords = (end - start + 3) / sizeof(u32);
|
||||
|
||||
data = (u32 *)((char *)dev_priv->buffers->handle
|
||||
data = (u32 *)((char *)dev->agp_buffer_map->handle
|
||||
+ buf->offset + start);
|
||||
|
||||
data[0] = cpu_to_le32( CCE_PACKET3( R128_3D_RNDR_GEN_INDX_PRIM,
|
||||
|
@ -857,7 +857,7 @@ static int r128_cce_dispatch_blit( DRMFILE filp,
|
|||
|
||||
dwords = (blit->width * blit->height) >> dword_shift;
|
||||
|
||||
data = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);
|
||||
data = (u32 *)((char *)dev->agp_buffer_map->handle + buf->offset);
|
||||
|
||||
data[0] = cpu_to_le32( CCE_PACKET3( R128_CNTL_HOSTDATA_BLT, dwords + 6 ) );
|
||||
data[1] = cpu_to_le32( (R128_GMC_DST_PITCH_OFFSET_CNTL |
|
||||
|
@ -1694,3 +1694,25 @@ int r128_getparam( DRM_IOCTL_ARGS )
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void r128_driver_prerelease(drm_device_t *dev, DRMFILE filp)
|
||||
{
|
||||
if ( dev->dev_private ) {
|
||||
drm_r128_private_t *dev_priv = dev->dev_private;
|
||||
if ( dev_priv->page_flipping ) {
|
||||
r128_do_cleanup_pageflip( dev );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void r128_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
r128_do_cleanup_cce( dev );
|
||||
}
|
||||
|
||||
void r128_driver_register_fns(drm_device_t *dev)
|
||||
{
|
||||
dev->dev_priv_size = sizeof(drm_r128_buf_priv_t);
|
||||
dev->fn_tbl.prerelease = r128_driver_prerelease;
|
||||
dev->fn_tbl.pretakedown = r128_driver_pretakedown;
|
||||
}
|
||||
|
|
|
@ -118,47 +118,6 @@
|
|||
#define DRIVER_FILE_FIELDS \
|
||||
int64_t radeon_fb_delta; \
|
||||
|
||||
#define DRIVER_OPEN_HELPER( filp_priv, dev ) \
|
||||
do { \
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private; \
|
||||
if ( dev_priv ) \
|
||||
filp_priv->radeon_fb_delta = dev_priv->fb_location; \
|
||||
else \
|
||||
filp_priv->radeon_fb_delta = 0; \
|
||||
} while( 0 )
|
||||
|
||||
#define DRIVER_POSTINIT(dev, flags) radeon_postinit(dev, flags)
|
||||
#define DRIVER_PREINIT(dev, flags) radeon_preinit(dev, flags)
|
||||
#define DRIVER_POSTCLEANUP(dev) radeon_postcleanup(dev)
|
||||
|
||||
/* When a client dies:
|
||||
* - Check for and clean up flipped page state
|
||||
* - Free any alloced GART memory.
|
||||
*
|
||||
* DRM infrastructure takes care of reclaiming dma buffers.
|
||||
*/
|
||||
#define DRIVER_PRERELEASE() \
|
||||
do { \
|
||||
if ( dev->dev_private ) { \
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private; \
|
||||
if ( dev_priv->page_flipping ) { \
|
||||
radeon_do_cleanup_pageflip( dev ); \
|
||||
} \
|
||||
radeon_mem_release( filp, dev_priv->gart_heap ); \
|
||||
radeon_mem_release( filp, dev_priv->fb_heap ); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/* When the last client dies, shut down the CP and free dev->dev_priv.
|
||||
*/
|
||||
/* #define __HAVE_RELEASE 1 */
|
||||
#define DRIVER_PRETAKEDOWN( dev ) \
|
||||
do { \
|
||||
radeon_do_release( dev ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
||||
/* DMA customization:
|
||||
*/
|
||||
#define __HAVE_DMA 1
|
||||
|
@ -166,12 +125,4 @@ do { \
|
|||
#define __HAVE_VBL_IRQ 1
|
||||
#define __HAVE_SHARED_IRQ 1
|
||||
|
||||
|
||||
/* Buffer customization:
|
||||
*/
|
||||
#define DRIVER_BUF_PRIV_T drm_radeon_buf_priv_t
|
||||
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
|
||||
((drm_radeon_private_t *)((dev)->dev_private))->buffers
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1108,33 +1108,33 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
|
||||
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
|
||||
if(!dev_priv->mmio) {
|
||||
DRM_ERROR("could not find mmio region!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->cp_ring, init->ring_offset );
|
||||
dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
|
||||
if(!dev_priv->cp_ring) {
|
||||
DRM_ERROR("could not find cp ring region!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->ring_rptr, init->ring_rptr_offset );
|
||||
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
|
||||
if(!dev_priv->ring_rptr) {
|
||||
DRM_ERROR("could not find ring read pointer!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
|
||||
if(!dev_priv->buffers) {
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
if(!dev->agp_buffer_map) {
|
||||
DRM_ERROR("could not find dma buffer region!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
if ( init->gart_textures_offset ) {
|
||||
DRM_FIND_MAP( dev_priv->gart_textures, init->gart_textures_offset );
|
||||
dev_priv->gart_textures = drm_core_findmap(dev, init->gart_textures_offset);
|
||||
if ( !dev_priv->gart_textures ) {
|
||||
DRM_ERROR("could not find GART texture region!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
|
@ -1147,13 +1147,13 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
init->sarea_priv_offset);
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
DRM_IOREMAP( dev_priv->cp_ring, dev );
|
||||
DRM_IOREMAP( dev_priv->ring_rptr, dev );
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
if ( dev_priv->flags & CHIP_IS_AGP ) {
|
||||
drm_core_ioremap( dev_priv->cp_ring, dev );
|
||||
drm_core_ioremap( dev_priv->ring_rptr, dev );
|
||||
drm_core_ioremap( dev->agp_buffer_map, dev );
|
||||
if(!dev_priv->cp_ring->handle ||
|
||||
!dev_priv->ring_rptr->handle ||
|
||||
!dev_priv->buffers->handle) {
|
||||
!dev->agp_buffer_map->handle) {
|
||||
DRM_ERROR("could not find ioremap agp regions!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
|
@ -1165,14 +1165,14 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
(void *)dev_priv->cp_ring->offset;
|
||||
dev_priv->ring_rptr->handle =
|
||||
(void *)dev_priv->ring_rptr->offset;
|
||||
dev_priv->buffers->handle = (void *)dev_priv->buffers->offset;
|
||||
dev->agp_buffer_map->handle = (void *)dev->agp_buffer_map->offset;
|
||||
|
||||
DRM_DEBUG( "dev_priv->cp_ring->handle %p\n",
|
||||
dev_priv->cp_ring->handle );
|
||||
DRM_DEBUG( "dev_priv->ring_rptr->handle %p\n",
|
||||
dev_priv->ring_rptr->handle );
|
||||
DRM_DEBUG( "dev_priv->buffers->handle %p\n",
|
||||
dev_priv->buffers->handle );
|
||||
DRM_DEBUG( "dev->agp_buffer_map->handle %p\n",
|
||||
dev->agp_buffer_map->handle );
|
||||
}
|
||||
|
||||
dev_priv->fb_location = ( RADEON_READ( RADEON_MC_FB_LOCATION )
|
||||
|
@ -1197,12 +1197,12 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP)
|
||||
dev_priv->gart_buffers_offset = (dev_priv->buffers->offset
|
||||
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
|
||||
- dev->agp->base
|
||||
+ dev_priv->gart_vm_start);
|
||||
else
|
||||
#endif
|
||||
dev_priv->gart_buffers_offset = (dev_priv->buffers->offset
|
||||
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
|
||||
- dev->sg->handle
|
||||
+ dev_priv->gart_vm_start);
|
||||
|
||||
|
@ -1268,19 +1268,20 @@ int radeon_do_cleanup_cp( drm_device_t *dev )
|
|||
#if __REALLY_HAVE_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
if ( dev_priv->cp_ring != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->cp_ring, dev );
|
||||
drm_core_ioremapfree( dev_priv->cp_ring, dev );
|
||||
if ( dev_priv->ring_rptr != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->ring_rptr, dev );
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
drm_core_ioremapfree( dev_priv->ring_rptr, dev );
|
||||
if ( dev->agp_buffer_map != NULL )
|
||||
drm_core_ioremapfree( dev->agp_buffer_map, dev );
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (!DRM(ati_pcigart_cleanup)( dev,
|
||||
dev_priv->phys_pci_gart,
|
||||
dev_priv->bus_pci_gart ))
|
||||
dev_priv->phys_pci_gart,
|
||||
dev_priv->bus_pci_gart ))
|
||||
DRM_ERROR( "failed to cleanup PCI GART!\n" );
|
||||
}
|
||||
|
||||
{
|
||||
int flags = dev_priv->flags;
|
||||
memset(dev_priv, 0, sizeof(*dev_priv));
|
||||
|
@ -1734,12 +1735,12 @@ static int radeon_register_regions(struct pci_dev *pdev) {
|
|||
/* request the mem regions */
|
||||
if (!request_mem_region (pci_resource_start( pdev, 2 ),
|
||||
pci_resource_len(pdev, 2), DRIVER_NAME)) {
|
||||
printk(KERN_ERR DRIVER_NAME ": cannot reserve MMIO region\n");
|
||||
DRM_ERROR("cannot reserve MMIO region\n");
|
||||
return retcode;
|
||||
}
|
||||
if (!request_mem_region (pci_resource_start( pdev, 0 ),
|
||||
pci_resource_len(pdev, 0), DRIVER_NAME)) {
|
||||
printk(KERN_ERR DRIVER_NAME ": cannot reserve FB region\n");
|
||||
DRM_ERROR("cannot reserve FB region\n");
|
||||
return retcode;
|
||||
}
|
||||
return 0;
|
||||
|
@ -1751,7 +1752,7 @@ static void radeon_release_regions(struct pci_dev *pdev) {
|
|||
}
|
||||
|
||||
/* Always create a map record for MMIO and FB memory, done from DRIVER_POSTINIT */
|
||||
int radeon_preinit( drm_device_t *dev, unsigned long flags )
|
||||
int radeon_preinit( struct drm_device *dev, unsigned long flags )
|
||||
{
|
||||
int retcode = -EINVAL;
|
||||
u32 save, temp;
|
||||
|
@ -1788,12 +1789,12 @@ int radeon_preinit( drm_device_t *dev, unsigned long flags )
|
|||
return 0;
|
||||
}
|
||||
|
||||
int radeon_postinit( drm_device_t *dev, unsigned long flags )
|
||||
int radeon_postinit( struct drm_device *dev, unsigned long flags )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_postcleanup( drm_device_t *dev )
|
||||
int radeon_postcleanup( struct drm_device *dev )
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -1803,4 +1804,5 @@ void radeon_postcleanup( drm_device_t *dev )
|
|||
radeon_release_regions(dev->pdev);
|
||||
|
||||
dev->dev_private = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ struct mem_block {
|
|||
|
||||
typedef struct drm_radeon_private {
|
||||
|
||||
u32 flags; /* see radeon_chip_flags */
|
||||
uint32_t flags; /* see radeon_chip_flags */
|
||||
|
||||
drm_radeon_ring_buffer_t ring;
|
||||
drm_radeon_sarea_t *sarea_priv;
|
||||
|
@ -152,7 +152,6 @@ typedef struct drm_radeon_private {
|
|||
drm_local_map_t *mmio;
|
||||
drm_local_map_t *cp_ring;
|
||||
drm_local_map_t *ring_rptr;
|
||||
drm_local_map_t *buffers;
|
||||
drm_local_map_t *gart_textures;
|
||||
|
||||
struct mem_block *gart_heap;
|
||||
|
@ -738,9 +737,9 @@ do { \
|
|||
} while (0)
|
||||
|
||||
extern int RADEON_READ_PLL( drm_device_t *dev, int addr );
|
||||
extern int radeon_preinit( drm_device_t *dev, unsigned long flags );
|
||||
extern int radeon_postinit( drm_device_t *dev, unsigned long flags );
|
||||
extern void radeon_postcleanup( drm_device_t *dev );
|
||||
extern int radeon_preinit( struct drm_device *dev, unsigned long flags );
|
||||
extern int radeon_postinit( struct drm_device *dev, unsigned long flags );
|
||||
extern int radeon_postcleanup( struct drm_device *dev );
|
||||
|
||||
#define CP_PACKET0( reg, n ) \
|
||||
(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
|
||||
|
|
|
@ -1247,7 +1247,7 @@ static void radeon_cp_dispatch_indirect( drm_device_t *dev,
|
|||
*/
|
||||
if ( dwords & 1 ) {
|
||||
u32 *data = (u32 *)
|
||||
((char *)dev_priv->buffers->handle
|
||||
((char *)dev->agp_buffer_map->handle
|
||||
+ buf->offset + start);
|
||||
data[dwords++] = RADEON_CP_PACKET2;
|
||||
}
|
||||
|
@ -1301,7 +1301,7 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
|
|||
|
||||
dwords = (prim->finish - prim->start + 3) / sizeof(u32);
|
||||
|
||||
data = (u32 *)((char *)dev_priv->buffers->handle +
|
||||
data = (u32 *)((char *)dev->agp_buffer_map->handle +
|
||||
elt_buf->offset + prim->start);
|
||||
|
||||
data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 );
|
||||
|
@ -1445,7 +1445,7 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
|
|||
|
||||
/* Dispatch the indirect buffer.
|
||||
*/
|
||||
buffer = (u32*)((char*)dev_priv->buffers->handle + buf->offset);
|
||||
buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset);
|
||||
dwords = size / 4;
|
||||
buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
|
||||
buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
|
||||
|
@ -2547,3 +2547,46 @@ int radeon_cp_setparam( DRM_IOCTL_ARGS ) {
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* When a client dies:
|
||||
* - Check for and clean up flipped page state
|
||||
* - Free any alloced GART memory.
|
||||
*
|
||||
* DRM infrastructure takes care of reclaiming dma buffers.
|
||||
*/
|
||||
static void radeon_driver_prerelease(drm_device_t *dev, DRMFILE filp)
|
||||
{
|
||||
if ( dev->dev_private ) {
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
if ( dev_priv->page_flipping ) {
|
||||
radeon_do_cleanup_pageflip( dev );
|
||||
}
|
||||
radeon_mem_release( filp, dev_priv->gart_heap );
|
||||
radeon_mem_release( filp, dev_priv->fb_heap );
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_driver_pretakedown(drm_device_t *dev)
|
||||
{
|
||||
radeon_do_release(dev);
|
||||
}
|
||||
|
||||
static void radeon_driver_open_helper(drm_device_t *dev, drm_file_t *filp_priv)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
if ( dev_priv )
|
||||
filp_priv->radeon_fb_delta = dev_priv->fb_location;
|
||||
else
|
||||
filp_priv->radeon_fb_delta = 0;
|
||||
}
|
||||
|
||||
void radeon_driver_register_fns(struct drm_device *dev)
|
||||
{
|
||||
dev->dev_priv_size = sizeof(drm_radeon_buf_priv_t);
|
||||
dev->fn_tbl.preinit = radeon_preinit;
|
||||
dev->fn_tbl.postinit = radeon_postinit;
|
||||
dev->fn_tbl.postcleanup = radeon_postcleanup;
|
||||
dev->fn_tbl.prerelease = radeon_driver_prerelease;
|
||||
dev->fn_tbl.pretakedown = radeon_driver_pretakedown;
|
||||
dev->fn_tbl.open_helper = radeon_driver_open_helper;
|
||||
}
|
||||
|
|
|
@ -60,12 +60,4 @@
|
|||
|
||||
#define __HAVE_COUNTERS 5
|
||||
|
||||
/* Buffer customization:
|
||||
*/
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
|
||||
((drm_sis_private_t *)((dev)->dev_private))->buffers
|
||||
|
||||
#define DRIVER_CTX_CTOR sis_init_context
|
||||
#define DRIVER_CTX_DTOR sis_final_context
|
||||
|
||||
#endif
|
||||
|
|
|
@ -31,8 +31,6 @@
|
|||
#include "sis_ds.h"
|
||||
|
||||
typedef struct drm_sis_private {
|
||||
drm_map_t *buffers;
|
||||
|
||||
memHeap_t *AGPHeap;
|
||||
memHeap_t *FBHeap;
|
||||
} drm_sis_private_t;
|
||||
|
|
|
@ -330,7 +330,7 @@ int sis_ioctl_agp_free( DRM_IOCTL_ARGS )
|
|||
return 0;
|
||||
}
|
||||
|
||||
int sis_init_context(drm_device_t *dev, int context)
|
||||
int sis_init_context(struct drm_device *dev, int context)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -362,7 +362,7 @@ int sis_init_context(drm_device_t *dev, int context)
|
|||
return 1;
|
||||
}
|
||||
|
||||
int sis_final_context(drm_device_t *dev, int context)
|
||||
int sis_final_context(struct drm_device *dev, int context)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -408,3 +408,9 @@ int sis_final_context(drm_device_t *dev, int context)
|
|||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void DRM(driver_register_fns)(drm_device_t *dev)
|
||||
{
|
||||
dev->fn_tbl.context_ctor = sis_init_context;
|
||||
dev->fn_tbl.context_dtor = sis_final_context;
|
||||
}
|
||||
|
|
|
@ -41,12 +41,4 @@
|
|||
#define __HAVE_SHARED_IRQ 1
|
||||
#define __HAVE_VBL_IRQ 1
|
||||
|
||||
|
||||
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
|
||||
((drm_via_private_t *)((dev)->dev_private))->buffers
|
||||
|
||||
#define DRIVER_CTX_CTOR via_init_context
|
||||
#define DRIVER_CTX_DTOR via_final_context
|
||||
|
||||
#endif
|
||||
|
|
|
@ -65,3 +65,4 @@
|
|||
#include "drm_proc.h"
|
||||
#include "drm_vm.h"
|
||||
#include "drm_stub.h"
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@ typedef struct drm_via_private {
|
|||
drm_map_t *fb;
|
||||
drm_map_t *mmio;
|
||||
unsigned long agpAddr;
|
||||
drm_map_t *buffers;
|
||||
wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
|
||||
} drm_via_private_t;
|
||||
|
||||
|
|
|
@ -47,14 +47,14 @@ int via_do_init_map(drm_device_t *dev, drm_via_init_t *init)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_FIND_MAP(dev_priv->fb, init->fb_offset);
|
||||
dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
|
||||
if (!dev_priv->fb) {
|
||||
DRM_ERROR("could not find framebuffer!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
via_do_cleanup_map(dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_FIND_MAP(dev_priv->mmio, init->mmio_offset);
|
||||
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
|
||||
if (!dev_priv->mmio) {
|
||||
DRM_ERROR("could not find mmio region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
|
|
@ -103,7 +103,7 @@ int via_fb_init( DRM_IOCTL_ARGS )
|
|||
return 0;
|
||||
}
|
||||
|
||||
int via_init_context(drm_device_t *dev, int context)
|
||||
int via_init_context(struct drm_device *dev, int context)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -134,7 +134,7 @@ int via_init_context(drm_device_t *dev, int context)
|
|||
return 1;
|
||||
}
|
||||
|
||||
int via_final_context(drm_device_t *dev, int context)
|
||||
int via_final_context(struct drm_device *dev, int context)
|
||||
{
|
||||
int i;
|
||||
for (i=0; i<MAX_CONTEXT; i++)
|
||||
|
@ -345,3 +345,9 @@ int via_agp_free(drm_via_mem_t* mem)
|
|||
|
||||
EXPORT_SYMBOL(via_fb_alloc);
|
||||
EXPORT_SYMBOL(via_fb_free);
|
||||
|
||||
void DRM(driver_register_fns)(drm_device_t *dev)
|
||||
{
|
||||
dev->fn_tbl.context_ctor = via_init_context;
|
||||
dev->fn_tbl.context_dtor = via_final_context;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue