Merge git://proxy01.pd.intel.com:9419/git/mesa/drm into crestline

main
Nian Wu 2007-03-19 17:00:31 +08:00
commit 8398b99d8d
17 changed files with 165 additions and 207 deletions

View File

@ -416,18 +416,18 @@ typedef struct drm_file {
struct drm_head *head;
int remove_auth_on_close;
unsigned long lock_count;
/*
* The user object hash table is global and resides in the
* drm_device structure. We protect the lists and hash tables with the
* device struct_mutex. A bit coarse-grained but probably the best
* device struct_mutex. A bit coarse-grained but probably the best
* option.
*/
struct list_head refd_objects;
struct list_head refd_objects;
struct list_head user_objects;
drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES];
drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES];
void *driver_priv;
} drm_file_t;
@ -534,7 +534,7 @@ typedef struct drm_sigdata {
} drm_sigdata_t;
/*
/*
* Generic memory manager structs
*/
@ -544,7 +544,7 @@ typedef struct drm_mm_node {
int free;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
struct drm_mm *mm;
void *private;
} drm_mm_node_t;
@ -562,7 +562,7 @@ typedef struct drm_map_list {
drm_hash_item_t hash;
drm_map_t *map; /**< mapping */
drm_u64_t user_token;
drm_mm_node_t *file_offset_node;
drm_mm_node_t *file_offset_node;
} drm_map_list_t;
typedef drm_map_t drm_local_map_t;
@ -653,9 +653,9 @@ struct drm_driver {
unsigned long (*get_reg_ofs) (struct drm_device * dev);
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
struct drm_fence_driver *fence_driver;
struct drm_fence_driver *fence_driver;
struct drm_bo_driver *bo_driver;
int major;
int minor;
int patchlevel;
@ -732,11 +732,11 @@ typedef struct drm_device {
/*@{ */
drm_map_list_t *maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
drm_open_hash_t map_hash; /**< User token hash table for maps */
drm_mm_t offset_manager; /**< User token manager */
drm_open_hash_t object_hash; /**< User token hash table for objects */
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
struct page *ttm_dummy_page;
drm_open_hash_t map_hash; /**< User token hash table for maps */
drm_mm_t offset_manager; /**< User token manager */
drm_open_hash_t object_hash; /**< User token hash table for objects */
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
struct page *ttm_dummy_page;
/** \name Context handle management */
/*@{ */
@ -818,7 +818,7 @@ typedef struct drm_device {
drm_fence_manager_t fm;
drm_buffer_manager_t bm;
/** \name Drawable information */
/*@{ */
spinlock_t drw_lock;
@ -948,7 +948,7 @@ extern void drm_free_memctl(size_t size);
extern int drm_alloc_memctl(size_t size);
extern void drm_query_memctl(drm_u64_t *cur_used,
drm_u64_t *low_threshold,
drm_u64_t *high_threshold);
drm_u64_t *high_threshold);
extern void drm_init_memctl(size_t low_threshold,
size_t high_threshold,
size_t unit_size);
@ -1161,14 +1161,14 @@ extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
drm_head_t * head);
extern void drm_sysfs_device_remove(struct class_device *class_dev);
/*
* Basic memory manager support (drm_mm.c)
/*
* Basic memory manager support (drm_mm.c)
*/
extern drm_mm_node_t * drm_mm_get_block(drm_mm_node_t * parent, unsigned long size,
unsigned alignment);
extern void drm_mm_put_block(drm_mm_node_t *cur);
extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,
extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,
unsigned alignment, int best_match);
extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
extern void drm_mm_takedown(drm_mm_t *mm);
@ -1181,11 +1181,6 @@ static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block)
{
return block->mm;
}
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
@ -1204,7 +1199,7 @@ static __inline__ int drm_device_is_agp(drm_device_t *dev)
{
if ( dev->driver->device_is_agp != NULL ) {
int err = (*dev->driver->device_is_agp)( dev );
if (err != 2) {
return err;
}

View File

@ -567,7 +567,7 @@ static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) {
}
static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
struct page **pages) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
@ -597,9 +597,9 @@ static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
return 0;
}
static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
unsigned long offset,
int cached)
int cached)
{
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
DRM_AGP_MEM *mem = agp_priv->mem;
@ -620,7 +620,7 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
DRM_DEBUG("drm_agp_unbind_ttm\n");
if (agp_priv->mem->is_bound)
return drm_agp_unbind_memory(agp_priv->mem);
@ -646,8 +646,8 @@ static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) {
static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv;
drm_agp_ttm_priv *agp_priv;
if (backend) {
DRM_DEBUG("drm_agp_destroy_ttm\n");
agp_priv = (drm_agp_ttm_priv *) backend->private;
@ -659,11 +659,11 @@ static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) {
backend->private = NULL;
}
if (backend->flags & DRM_BE_FLAG_NEEDS_FREE) {
drm_ctl_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);
drm_ctl_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);
}
}
}
drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
drm_ttm_backend_t *backend)
@ -695,15 +695,15 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
if (!agp_be)
return NULL;
agp_priv = drm_ctl_calloc(1, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
if (!agp_priv) {
drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
return NULL;
}
agp_priv->mem = NULL;
agp_priv->alloc_type = AGP_USER_MEMORY;
agp_priv->cached_type = AGP_USER_CACHED_MEMORY;

View File

@ -1,8 +1,8 @@
/**************************************************************************
*
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@ -14,15 +14,15 @@
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
@ -35,7 +35,7 @@
*
* The buffer usage atomic_t needs to be protected by dev->struct_mutex
* when there is a chance that it can be zero before or after the operation.
*
*
* dev->struct_mutex also protects all lists and list heads. Hash tables and hash
* heads.
*
@ -502,7 +502,7 @@ static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
}
/*
* Note. The caller has to register (if applicable)
* Note. The caller has to register (if applicable)
* and deregister fence object usage.
*/
@ -546,7 +546,7 @@ int drm_fence_buffer_objects(drm_file_t * priv,
/*
* Transfer to a local list before we release the dev->struct_mutex;
* This is so we don't get any new unfenced objects while fencing
* This is so we don't get any new unfenced objects while fencing
* the ones we already have..
*/
@ -604,7 +604,7 @@ int drm_fence_buffer_objects(drm_file_t * priv,
EXPORT_SYMBOL(drm_fence_buffer_objects);
/*
* bo->mutex locked
* bo->mutex locked
*/
static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
@ -1011,7 +1011,7 @@ static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
* 1) validating
* 2) submitting commands
* 3) fencing
* Should really be an atomic operation.
* Should really be an atomic operation.
* We now "solve" this problem by keeping
* the buffer "unfenced" after validating, but before fencing.
*/
@ -1053,7 +1053,7 @@ static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
/*
* Fill in the ioctl reply argument with buffer info.
* Bo locked.
* Bo locked.
*/
static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
@ -1078,8 +1078,8 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
/*
* Wait for buffer idle and register that we've mapped the buffer.
* Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
* so that if the client dies, the mapping is automatically
* Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
* so that if the client dies, the mapping is automatically
* unregistered.
*/
@ -1215,7 +1215,7 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv,
}
/*
* bo->mutex locked.
* bo->mutex locked.
* Note that new_mem_flags are NOT transferred to the bo->mem.mask.
*/
@ -2251,7 +2251,7 @@ EXPORT_SYMBOL(drm_mem_reg_is_pci);
* \param bus_size On return the byte size of the buffer object or zero if
* the buffer object memory is not accessible through a PCI region.
* \return Failure indication.
*
*
* Returns -EINVAL if the buffer object is currently not mappable.
* Otherwise returns zero.
*/

View File

@ -442,7 +442,7 @@ void drm_exit(struct drm_driver *driver)
EXPORT_SYMBOL(drm_exit);
/** File operations structure */
static struct file_operations drm_stub_fops = {
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
.open = drm_stub_open
};
@ -591,21 +591,20 @@ int drm_ioctl(struct inode *inode, struct file *filp,
current->pid, cmd, nr, (long)old_encode_dev(priv->head->device),
priv->authenticated);
if (nr >= DRIVER_IOCTL_COUNT &&
(nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END))
if ((nr >= DRIVER_IOCTL_COUNT) &&
((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
goto err_i1;
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
else if (nr >= DRM_COMMAND_END || nr < DRM_COMMAND_BASE)
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
ioctl = &drm_ioctls[nr];
else
else
goto err_i1;
func = ioctl->func;
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) /* Local override? */
/* is there a local override? */
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
func = dev->driver->dma_ioctl;
if (!func) {

View File

@ -159,7 +159,7 @@ int drm_open(struct inode *inode, struct file *filp)
spin_unlock(&dev->count_lock);
}
mutex_lock(&dev->struct_mutex);
BUG_ON((dev->dev_mapping != NULL) &&
BUG_ON((dev->dev_mapping != NULL) &&
(dev->dev_mapping != inode->i_mapping));
if (dev->dev_mapping == NULL)
dev->dev_mapping = inode->i_mapping;
@ -355,42 +355,34 @@ static void drm_object_release(struct file *filp) {
/*
* Free leftover ref objects created by me. Note that we cannot use
* list_for_each() here, as the struct_mutex may be temporarily released
* list_for_each() here, as the struct_mutex may be temporarily released
* by the remove_() functions, and thus the lists may be altered.
* Also, a drm_remove_ref_object() will not remove it
* from the list unless its refcount is 1.
*/
head = &priv->refd_objects;
head = &priv->refd_objects;
while (head->next != head) {
ref_object = list_entry(head->next, drm_ref_object_t, list);
drm_remove_ref_object(priv, ref_object);
head = &priv->refd_objects;
drm_remove_ref_object(priv, ref_object);
head = &priv->refd_objects;
}
/*
* Free leftover user objects created by me.
*/
head = &priv->user_objects;
head = &priv->user_objects;
while (head->next != head) {
user_object = list_entry(head->next, drm_user_object_t, list);
drm_remove_user_object(priv, user_object);
head = &priv->user_objects;
drm_remove_user_object(priv, user_object);
head = &priv->user_objects;
}
for(i=0; i<_DRM_NO_REF_TYPES; ++i) {
drm_ht_remove(&priv->refd_object_hash[i]);
}
}
}
/**
* Release file.
@ -563,7 +555,7 @@ EXPORT_SYMBOL(drm_release);
* to set a newer interface version to avoid breaking older Xservers.
* Without fixing the Xserver you get: "WaitForSomething(): select: errno=22"
* http://freedesktop.org/bugzilla/show_bug.cgi?id=1505 if you try
* to return the correct response.
* to return the correct response.
*/
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
{

View File

@ -138,12 +138,12 @@ static int drm_set_busid(drm_device_t * dev)
{
int len;
if (dev->unique != NULL)
return EBUSY;
return -EBUSY;
dev->unique_len = 40;
dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
if (dev->unique == NULL)
return ENOMEM;
return -ENOMEM;
len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
@ -156,7 +156,7 @@ static int drm_set_busid(drm_device_t * dev)
dev->devname = drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + 2,
DRM_MEM_DRIVER);
if (dev->devname == NULL)
return ENOMEM;
return -ENOMEM;
sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, dev->unique);
@ -343,7 +343,7 @@ int drm_setversion(DRM_IOCTL_ARGS)
if (sv.drm_di_major != -1) {
if (sv.drm_di_major != DRM_IF_MAJOR ||
sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
return EINVAL;
return -EINVAL;
if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor);
dev->if_version = max(if_version, dev->if_version);
if (sv.drm_di_minor >= 1) {
@ -357,7 +357,7 @@ int drm_setversion(DRM_IOCTL_ARGS)
if (sv.drm_dd_major != -1) {
if (sv.drm_dd_major != dev->driver->major ||
sv.drm_dd_minor < 0 || sv.drm_dd_minor > dev->driver->minor)
return EINVAL;
return -EINVAL;
if (dev->driver->set_version)
dev->driver->set_version(dev, &sv);

View File

@ -213,6 +213,49 @@ void drm_free_pages(unsigned long address, int order, int area)
}
#if __OS_HAS_AGP
static void *agp_remap(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
unsigned long *phys_addr_map, i, num_pages =
PAGE_ALIGN(size) / PAGE_SIZE;
struct drm_agp_mem *agpmem;
struct page **page_map;
void *addr;
size = PAGE_ALIGN(size);
#ifdef __alpha__
offset -= dev->hose->mem_space->start;
#endif
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
if (agpmem->bound <= offset
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))
break;
if (!agpmem)
return NULL;
/*
* OK, we're mapping AGP space on a chipset/platform on which memory accesses by
* the CPU do not get remapped by the GART. We fix this by using the kernel's
* page-table instead (that's probably faster anyhow...).
*/
/* note: use vmalloc() because num_pages could be large... */
page_map = vmalloc(num_pages * sizeof(struct page *));
if (!page_map)
return NULL;
phys_addr_map =
agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
for (i = 0; i < num_pages; ++i)
page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
vfree(page_map);
return addr;
}
/** Wrapper around agp_allocate_memory() */
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)
@ -243,7 +286,15 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return drm_agp_unbind_memory(handle);
}
#else /* __OS_HAS_AGP*/
static void *agp_remap(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
return NULL;
}
#endif /* agp */
#endif /* debug_memory */
void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)

View File

@ -57,83 +57,6 @@
# endif
#endif
/*
* Find the drm_map that covers the range [offset, offset+size).
*/
static inline drm_map_t *drm_lookup_map(unsigned long offset,
unsigned long size, drm_device_t * dev)
{
struct list_head *list;
drm_map_list_t *r_list;
drm_map_t *map;
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *) list;
map = r_list->map;
if (!map)
continue;
if (map->offset <= offset
&& (offset + size) <= (map->offset + map->size))
return map;
}
return NULL;
}
static inline void *agp_remap(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
unsigned long *phys_addr_map, i, num_pages =
PAGE_ALIGN(size) / PAGE_SIZE;
struct drm_agp_mem *agpmem;
struct page **page_map;
void *addr;
size = PAGE_ALIGN(size);
#ifdef __alpha__
offset -= dev->hose->mem_space->start;
#endif
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
if (agpmem->bound <= offset
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))
break;
if (!agpmem)
return NULL;
/*
* OK, we're mapping AGP space on a chipset/platform on which memory accesses by
* the CPU do not get remapped by the GART. We fix this by using the kernel's
* page-table instead (that's probably faster anyhow...).
*/
/* note: use vmalloc() because num_pages could be large... */
page_map = vmalloc(num_pages * sizeof(struct page *));
if (!page_map)
return NULL;
phys_addr_map =
agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
for (i = 0; i < num_pages; ++i)
page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
vfree(page_map);
return addr;
}
#else /* __OS_HAS_AGP */
static inline drm_map_t *drm_lookup_map(unsigned long offset,
unsigned long size, drm_device_t * dev)
{
return NULL;
}
static inline void *agp_remap(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
return NULL;
}
#endif

View File

@ -53,7 +53,7 @@ unsigned long drm_mm_tail_space(drm_mm_t *mm)
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
if (!entry->free)
return 0;
return entry->size;
}
@ -66,7 +66,7 @@ int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
if (!entry->free)
return -ENOMEM;
if (entry->size <= size)
return -ENOMEM;
@ -80,7 +80,7 @@ static int drm_mm_create_tail_node(drm_mm_t *mm,
unsigned long size)
{
drm_mm_node_t *child;
child = (drm_mm_node_t *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
@ -96,7 +96,7 @@ static int drm_mm_create_tail_node(drm_mm_t *mm,
return 0;
}
int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
{
@ -111,12 +111,12 @@ int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
entry->size += size;
return 0;
}
static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
unsigned long size)
{
drm_mm_node_t *child;
child = (drm_mm_node_t *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
@ -136,8 +136,6 @@ static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
parent->start += size;
return child;
}
drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
unsigned long size, unsigned alignment)
@ -149,13 +147,13 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
if (alignment)
tmp = parent->start % alignment;
if (tmp) {
align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
if (!align_splitoff)
return NULL;
}
if (parent->size == size) {
list_del_init(&parent->fl_entry);
parent->free = 0;

View File

@ -1,8 +1,8 @@
/**************************************************************************
*
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@ -14,15 +14,15 @@
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>

View File

@ -1,8 +1,8 @@
/**************************************************************************
*
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@ -10,7 +10,7 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
@ -19,8 +19,8 @@
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
@ -222,12 +222,12 @@ extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
* The ttm backend GTT interface. (In our case AGP).
* Any similar type of device (PCIE?)
* needs only to implement these functions to be usable with the "TTM" interface.
* The AGP backend implementation lives in drm_agpsupport.c
* The AGP backend implementation lives in drm_agpsupport.c
* basically maps these calls to available functions in agpgart.
* Each drm device driver gets an
* additional function pointer that creates these types,
* additional function pointer that creates these types,
* so that the device can choose the correct aperture.
* (Multiple AGP apertures, etc.)
* (Multiple AGP apertures, etc.)
* Most device drivers will let this point to the standard AGP implementation.
*/
@ -275,7 +275,7 @@ extern void drm_ttm_fixup_caching(drm_ttm_t * ttm);
extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index);
/*
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
* which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
* when the last vma exits.
*/

View File

@ -75,7 +75,7 @@ static struct drm_proc_list {
#endif
};
#define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0]))
#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
/**
* Initialize the DRI proc filesystem for a device.

View File

@ -1,8 +1,8 @@
/**************************************************************************
*
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@ -10,19 +10,19 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
@ -101,7 +101,7 @@ static struct page *drm_ttm_alloc_page(void)
}
/*
* Change caching policy for the linear kernel map
* Change caching policy for the linear kernel map
* for range of pages in a ttm.
*/

View File

@ -131,7 +131,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
return 0;
}
static struct file_operations i810_buffer_fops = {
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,

View File

@ -117,7 +117,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
return 0;
}
static struct file_operations i830_buffer_fops = {
static const struct file_operations i830_buffer_fops = {
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,

View File

@ -798,7 +798,7 @@ typedef enum {
drm_bo_type_user,
drm_bo_type_fake
}drm_bo_type_t;
typedef struct drm_bo_arg_request {
unsigned handle; /* User space handle */
@ -846,10 +846,10 @@ typedef struct drm_bo_arg_reply {
unsigned page_alignment;
unsigned expand_pad[4]; /*Future expansion */
}drm_bo_arg_reply_t;
typedef struct drm_bo_arg{
int handled;
int handled;
drm_u64_t next;
union {
drm_bo_arg_request_t req;

View File

@ -112,14 +112,14 @@ typedef struct drm_i915_private {
struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
spinlock_t user_irq_lock;
int user_irq_refcount;
int fence_irq_on;
uint32_t irq_enable_reg;
int irq_enabled;
spinlock_t user_irq_lock;
int user_irq_refcount;
int fence_irq_on;
uint32_t irq_enable_reg;
int irq_enabled;
#ifdef I915_HAVE_FENCE
uint32_t flush_sequence;
uint32_t flush_sequence;
uint32_t flush_flags;
uint32_t flush_pending;
uint32_t saved_flush_status;