Revert back to drm_order() instead of using kernel get_order(). The
functions are not identical.main
parent
4dbc1e8728
commit
61d36f6179
|
@ -868,6 +868,7 @@ extern int drm_freebufs(struct inode *inode, struct file *filp,
|
|||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_mapbufs(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_order(unsigned long size);
|
||||
|
||||
/* DMA support (drm_dma.h) */
|
||||
extern int drm_dma_setup(drm_device_t * dev);
|
||||
|
|
|
@ -212,7 +212,7 @@ int drm_addmap(struct inode *inode, struct file *filp,
|
|||
case _DRM_SHM:
|
||||
map->handle = vmalloc_32(map->size);
|
||||
DRM_DEBUG("%lu %d %p\n",
|
||||
map->size, get_order(map->size), map->handle);
|
||||
map->size, drm_order(map->size), map->handle);
|
||||
if (!map->handle) {
|
||||
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
return -ENOMEM;
|
||||
|
@ -440,7 +440,7 @@ int drm_addbufs_agp(struct inode *inode, struct file *filp,
|
|||
return -EFAULT;
|
||||
|
||||
count = request.count;
|
||||
order = get_order(request.size);
|
||||
order = drm_order(request.size);
|
||||
size = 1 << order;
|
||||
|
||||
alignment = (request.flags & _DRM_PAGE_ALIGN)
|
||||
|
@ -609,7 +609,7 @@ int drm_addbufs_pci(struct inode *inode, struct file *filp,
|
|||
return -EFAULT;
|
||||
|
||||
count = request.count;
|
||||
order = get_order(request.size);
|
||||
order = drm_order(request.size);
|
||||
size = 1 << order;
|
||||
|
||||
DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
|
||||
|
@ -835,7 +835,7 @@ int drm_addbufs_sg(struct inode *inode, struct file *filp,
|
|||
return -EFAULT;
|
||||
|
||||
count = request.count;
|
||||
order = get_order(request.size);
|
||||
order = drm_order(request.size);
|
||||
size = 1 << order;
|
||||
|
||||
alignment = (request.flags & _DRM_PAGE_ALIGN)
|
||||
|
@ -1137,7 +1137,7 @@ int drm_markbufs(struct inode *inode, struct file *filp,
|
|||
|
||||
DRM_DEBUG("%d, %d, %d\n",
|
||||
request.size, request.low_mark, request.high_mark);
|
||||
order = get_order(request.size);
|
||||
order = drm_order(request.size);
|
||||
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
|
||||
return -EINVAL;
|
||||
entry = &dma->bufs[order];
|
||||
|
@ -1332,3 +1332,28 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
|
|||
|
||||
return retcode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute size order. Returns the exponent of the smaller power of two which
|
||||
* is greater or equal to given number.
|
||||
*
|
||||
* \param size size.
|
||||
* \return order.
|
||||
*
|
||||
* \todo Can be made faster.
|
||||
*/
|
||||
int drm_order( unsigned long size )
|
||||
{
|
||||
int order;
|
||||
unsigned long tmp;
|
||||
|
||||
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
|
||||
;
|
||||
|
||||
if (size & (size - 1))
|
||||
++order;
|
||||
|
||||
return order;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_order);
|
||||
|
||||
|
|
|
@ -541,7 +541,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
|
|||
dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
|
||||
+ init->ring_size / sizeof(u32));
|
||||
dev_priv->ring.size = init->ring_size;
|
||||
dev_priv->ring.size_l2qw = get_order(init->ring_size / 8);
|
||||
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
|
||||
|
||||
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
|
||||
|
||||
|
|
|
@ -1202,7 +1202,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
|
||||
+ init->ring_size / sizeof(u32));
|
||||
dev_priv->ring.size = init->ring_size;
|
||||
dev_priv->ring.size_l2qw = get_order(init->ring_size / 8);
|
||||
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
|
||||
|
||||
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
|
||||
|
||||
|
|
Loading…
Reference in New Issue