Merge git://proxy01.pd.intel.com:9419/git/mesa/drm into crestline

main
Nian Wu 2007-03-23 17:00:41 +08:00
commit e7cd5a1e2d
7 changed files with 100 additions and 132 deletions

View File

@ -118,7 +118,6 @@ static int drm_irq_install(drm_device_t * dev)
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->tasklet_lock);
INIT_LIST_HEAD(&dev->vbl_sigs.head);
INIT_LIST_HEAD(&dev->vbl_sigs2.head);

View File

@ -61,17 +61,17 @@ int drm_alloc_memctl(size_t size)
{
int ret;
unsigned long a_size = drm_size_align(size);
spin_lock(&drm_memctl.lock);
ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ?
ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ?
-ENOMEM : 0;
if (!ret)
if (!ret)
drm_memctl.cur_used += a_size;
spin_unlock(&drm_memctl.lock);
return ret;
}
EXPORT_SYMBOL(drm_alloc_memctl);
void drm_free_memctl(size_t size)
{
unsigned long a_size = drm_size_align(size);
@ -84,14 +84,14 @@ EXPORT_SYMBOL(drm_free_memctl);
void drm_query_memctl(drm_u64_t *cur_used,
drm_u64_t *low_threshold,
drm_u64_t *high_threshold)
drm_u64_t *high_threshold)
{
spin_lock(&drm_memctl.lock);
*cur_used = drm_memctl.cur_used;
*low_threshold = drm_memctl.low_threshold;
*high_threshold = drm_memctl.high_threshold;
spin_unlock(&drm_memctl.lock);
}
}
EXPORT_SYMBOL(drm_query_memctl);
void drm_init_memctl(size_t p_low_threshold,

View File

@ -244,7 +244,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
map->size, type, map->flags,
map->size, type, map->flags,
(unsigned long) r_list->user_token);
if (map->mtrr < 0) {
@ -438,7 +438,7 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
drm_device_t *dev = (drm_device_t *) data;
int len = 0;
drm_buffer_manager_t *bm = &dev->bm;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_manager_t *fm = &dev->fm;
drm_u64_t used_mem;
drm_u64_t low_mem;
drm_u64_t high_mem;
@ -451,17 +451,17 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Object accounting:\n\n");
if (fm->initialized) {
DRM_PROC_PRINT("Number of active fence objects: %d.\n",
DRM_PROC_PRINT("Number of active fence objects: %d.\n",
atomic_read(&fm->count));
} else {
DRM_PROC_PRINT("Fence objects are not supported by this driver\n");
}
if (bm->initialized) {
DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
atomic_read(&bm->count));
}
DRM_PROC_PRINT("Memory accounting:\n\n");
@ -473,16 +473,16 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
drm_query_memctl(&used_mem, &low_mem, &high_mem);
if (used_mem > 16*PAGE_SIZE) {
DRM_PROC_PRINT("Used object memory is %lu pages.\n",
if (used_mem > 16*PAGE_SIZE) {
DRM_PROC_PRINT("Used object memory is %lu pages.\n",
(unsigned long) (used_mem >> PAGE_SHIFT));
} else {
DRM_PROC_PRINT("Used object memory is %lu bytes.\n",
DRM_PROC_PRINT("Used object memory is %lu bytes.\n",
(unsigned long) used_mem);
}
DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
(unsigned long) (low_mem >> PAGE_SHIFT));
DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
(unsigned long) (high_mem >> PAGE_SHIFT));
DRM_PROC_PRINT("\n");

View File

@ -83,7 +83,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
return -ENOMEM;
}
if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
drm_ht_remove(&dev->map_hash);

View File

@ -684,7 +684,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_private_data = (void *)map;
vma->vm_flags |= VM_RESERVED;
break;
case _DRM_TTM:
case _DRM_TTM:
return drm_bo_mmap_locked(vma, filp, map);
default:
return -EINVAL; /* This should never happen. */
@ -732,13 +732,13 @@ EXPORT_SYMBOL(drm_mmap);
*/
#ifdef DRM_FULL_MM_COMPAT
static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
unsigned long address)
{
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page = NULL;
drm_ttm_t *ttm;
drm_ttm_t *ttm;
drm_device_t *dev;
unsigned long pfn;
int err;
@ -746,10 +746,10 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
unsigned long bus_offset;
unsigned long bus_size;
int ret = NOPFN_REFAULT;
if (address > vma->vm_end)
if (address > vma->vm_end)
return NOPFN_SIGBUS;
err = mutex_lock_interruptible(&bo->mutex);
if (err)
return NOPFN_REFAULT;
@ -766,8 +766,8 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
*/
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
uint32_t new_mask = bo->mem.mask |
DRM_BO_FLAG_MAPPABLE |
uint32_t new_mask = bo->mem.mask |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
if (err) {
@ -777,7 +777,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
}
dev = bo->dev;
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
&bus_size);
if (err) {
@ -804,7 +804,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
pfn = page_to_pfn(page);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
err = vm_insert_pfn(vma, address, pfn);
if (err) {
ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
@ -903,6 +903,6 @@ int drm_bo_mmap_locked(struct vm_area_struct *vma,
drm_bo_vm_open_locked(vma);
#ifdef DRM_ODD_MM_COMPAT
drm_bo_map_bound(vma);
#endif
#endif
return 0;
}

View File

@ -110,13 +110,6 @@ typedef struct drm_nouveau_private {
drm_local_map_t *fb;
drm_local_map_t *ramin; /* NV40 onwards */
//TODO: Remove me, I'm bogus :)
int cur_fifo;
struct nouveau_object *fb_obj;
int cmdbuf_ch_size;
struct mem_block* cmdbuf_alloc;
int fifo_alloc_count;
struct nouveau_fifo fifos[NV_MAX_FIFO_NUMBER];

View File

@ -273,24 +273,21 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel)
}
#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV04_RAMFC_##offset, (val))
static void nouveau_nv04_context_init(drm_device_t *dev,
drm_nouveau_fifo_alloc_t *init)
static void nouveau_nv04_context_init(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_object *cb_obj;
uint32_t fifoctx, ctx_size = 32;
int i;
cb_obj = dev_priv->fifos[init->channel].cmdbuf_obj;
cb_obj = dev_priv->fifos[channel].cmdbuf_obj;
fifoctx=NV_RAMIN+dev_priv->ramfc_offset+init->channel*ctx_size;
fifoctx=NV_RAMIN+dev_priv->ramfc_offset+channel*ctx_size;
// clear the fifo context
for(i=0;i<ctx_size/4;i++)
NV_WRITE(fifoctx+4*i,0x0);
RAMFC_WR(DMA_PUT , init->put_base);
RAMFC_WR(DMA_GET , init->put_base);
RAMFC_WR(DMA_INSTANCE , nouveau_chip_instance_get(dev, cb_obj->instance));
RAMFC_WR(DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES |
@ -304,15 +301,14 @@ static void nouveau_nv04_context_init(drm_device_t *dev,
#undef RAMFC_WR
#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV10_RAMFC_##offset, (val))
static void nouveau_nv10_context_init(drm_device_t *dev,
drm_nouveau_fifo_alloc_t *init)
static void nouveau_nv10_context_init(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_object *cb_obj;
uint32_t fifoctx;
int i;
cb_obj = dev_priv->fifos[init->channel].cmdbuf_obj;
fifoctx = NV_RAMIN + dev_priv->ramfc_offset + init->channel*64;
cb_obj = dev_priv->fifos[channel].cmdbuf_obj;
fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64;
for (i=0;i<64;i+=4)
NV_WRITE(fifoctx + i, 0);
@ -321,8 +317,6 @@ static void nouveau_nv10_context_init(drm_device_t *dev,
* after channel's is put into DMA mode
*/
RAMFC_WR(DMA_PUT , init->put_base);
RAMFC_WR(DMA_GET , init->put_base);
RAMFC_WR(DMA_INSTANCE , nouveau_chip_instance_get(dev,
cb_obj->instance));
@ -335,25 +329,22 @@ static void nouveau_nv10_context_init(drm_device_t *dev,
0x00000000);
}
static void nouveau_nv30_context_init(drm_device_t *dev,
drm_nouveau_fifo_alloc_t *init)
static void nouveau_nv30_context_init(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = &dev_priv->fifos[init->channel];
struct nouveau_fifo *chan = &dev_priv->fifos[channel];
struct nouveau_object *cb_obj;
uint32_t fifoctx, grctx_inst, cb_inst, ctx_size = 64;
int i;
cb_obj = dev_priv->fifos[init->channel].cmdbuf_obj;
cb_obj = dev_priv->fifos[channel].cmdbuf_obj;
cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance);
grctx_inst = nouveau_chip_instance_get(dev, chan->ramin_grctx);
fifoctx = NV_RAMIN + dev_priv->ramfc_offset + init->channel * ctx_size;
fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel * ctx_size;
for (i = 0; i < ctx_size; i += 4)
NV_WRITE(fifoctx + i, 0);
RAMFC_WR(DMA_PUT, init->put_base);
RAMFC_WR(DMA_GET, init->put_base);
RAMFC_WR(REF_CNT, NV_READ(NV10_PFIFO_CACHE1_REF_CNT));
RAMFC_WR(DMA_INSTANCE, cb_inst);
RAMFC_WR(DMA_STATE, NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
@ -371,8 +362,6 @@ static void nouveau_nv30_context_init(drm_device_t *dev,
RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP));
RAMFC_WR(ACQUIRE_TIMEOUT, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
RAMFC_WR(SEMAPHORE, NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
RAMFC_WR(DMA_SUBROUTINE, init->put_base);
}
static void nouveau_nv10_context_save(drm_device_t *dev)
@ -401,25 +390,22 @@ static void nouveau_nv10_context_save(drm_device_t *dev)
#undef RAMFC_WR
#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV40_RAMFC_##offset, (val))
static void nouveau_nv40_context_init(drm_device_t *dev,
drm_nouveau_fifo_alloc_t *init)
static void nouveau_nv40_context_init(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = &dev_priv->fifos[init->channel];
struct nouveau_fifo *chan = &dev_priv->fifos[channel];
uint32_t fifoctx, cb_inst, grctx_inst;
int i;
cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance);
grctx_inst = nouveau_chip_instance_get(dev, chan->ramin_grctx);
fifoctx = NV_RAMIN + dev_priv->ramfc_offset + init->channel*128;
fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128;
for (i=0;i<128;i+=4)
NV_WRITE(fifoctx + i, 0);
/* Fill entries that are seen filled in dumps of nvidia driver just
* after channel's is put into DMA mode
*/
RAMFC_WR(DMA_PUT , init->put_base);
RAMFC_WR(DMA_GET , init->put_base);
RAMFC_WR(DMA_INSTANCE , cb_inst);
RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
@ -428,7 +414,6 @@ static void nouveau_nv40_context_init(drm_device_t *dev,
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x30000000 /* no idea.. */);
RAMFC_WR(DMA_SUBROUTINE, init->put_base);
RAMFC_WR(GRCTX_INSTANCE, grctx_inst);
RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF);
}
@ -503,12 +488,12 @@ nouveau_fifo_context_restore(drm_device_t *dev, int channel)
}
/* allocates and initializes a fifo for user space consumption */
static int nouveau_fifo_alloc(drm_device_t* dev,drm_nouveau_fifo_alloc_t* init, DRMFILE filp)
static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp)
{
int i;
int ret;
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_object *cb_obj;
int channel;
/*
* Alright, here is the full story
@ -518,32 +503,29 @@ static int nouveau_fifo_alloc(drm_device_t* dev,drm_nouveau_fifo_alloc_t* init,
* (woo, full userspace command submission !)
* When there are no more contexts, you lost
*/
for(i=0;i<nouveau_fifo_number(dev);i++)
if (dev_priv->fifos[i].used==0)
for(channel=0; channel<nouveau_fifo_number(dev); channel++)
if (dev_priv->fifos[channel].used==0)
break;
DRM_INFO("Allocating FIFO number %d\n", i);
/* no more fifos. you lost. */
if (i==nouveau_fifo_number(dev))
if (channel==nouveau_fifo_number(dev))
return DRM_ERR(EINVAL);
(*chan_ret) = channel;
DRM_INFO("Allocating FIFO number %d\n", channel);
/* that fifo is used */
dev_priv->fifos[i].used = 1;
dev_priv->fifos[i].filp = filp;
dev_priv->fifos[channel].used = 1;
dev_priv->fifos[channel].filp = filp;
/* FIFO has no objects yet */
dev_priv->fifos[i].objs = NULL;
dev_priv->fifos[channel].objs = NULL;
/* allocate a command buffer, and create a dma object for the gpu */
ret = nouveau_fifo_cmdbuf_alloc(dev, i);
ret = nouveau_fifo_cmdbuf_alloc(dev, channel);
if (ret) {
nouveau_fifo_free(dev, i);
nouveau_fifo_free(dev, channel);
return ret;
}
cb_obj = dev_priv->fifos[i].cmdbuf_obj;
init->channel = i;
init->put_base = 0;
dev_priv->cur_fifo = init->channel;
cb_obj = dev_priv->fifos[channel].cmdbuf_obj;
nouveau_wait_for_idle(dev);
@ -558,58 +540,58 @@ static int nouveau_fifo_alloc(drm_device_t* dev,drm_nouveau_fifo_alloc_t* init,
{
case NV_04:
case NV_05:
nv04_graph_context_create(dev, init->channel);
nouveau_nv04_context_init(dev, init);
nv04_graph_context_create(dev, channel);
nouveau_nv04_context_init(dev, channel);
break;
case NV_10:
nv10_graph_context_create(dev, init->channel);
nouveau_nv10_context_init(dev, init);
nv10_graph_context_create(dev, channel);
nouveau_nv10_context_init(dev, channel);
break;
case NV_20:
ret = nv20_graph_context_create(dev, init->channel);
ret = nv20_graph_context_create(dev, channel);
if (ret) {
nouveau_fifo_free(dev, init->channel);
nouveau_fifo_free(dev, channel);
return ret;
}
nouveau_nv10_context_init(dev, init);
nouveau_nv10_context_init(dev, channel);
break;
case NV_30:
ret = nv30_graph_context_create(dev, init->channel);
ret = nv30_graph_context_create(dev, channel);
if (ret) {
nouveau_fifo_free(dev, init->channel);
nouveau_fifo_free(dev, channel);
return ret;
}
nouveau_nv30_context_init(dev, init);
nouveau_nv30_context_init(dev, channel);
break;
case NV_40:
case NV_44:
case NV_50:
ret = nv40_graph_context_create(dev, init->channel);
ret = nv40_graph_context_create(dev, channel);
if (ret) {
nouveau_fifo_free(dev, init->channel);
nouveau_fifo_free(dev, channel);
return ret;
}
nouveau_nv40_context_init(dev, init);
nouveau_nv40_context_init(dev, channel);
break;
}
/* enable the fifo dma operation */
NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<init->channel));
NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<channel));
/* setup channel's default get/put values */
NV_WRITE(NV03_FIFO_REGS_DMAPUT(init->channel), init->put_base);
NV_WRITE(NV03_FIFO_REGS_DMAGET(init->channel), init->put_base);
NV_WRITE(NV03_FIFO_REGS_DMAPUT(channel), 0);
NV_WRITE(NV03_FIFO_REGS_DMAGET(channel), 0);
/* If this is the first channel, setup PFIFO ourselves. For any
* other case, the GPU will handle this when it switches contexts.
*/
if (dev_priv->fifo_alloc_count == 0) {
nouveau_fifo_context_restore(dev, init->channel);
nouveau_fifo_context_restore(dev, channel);
if (dev_priv->card_type >= NV_30) {
struct nouveau_fifo *chan;
uint32_t inst;
chan = &dev_priv->fifos[init->channel];
chan = &dev_priv->fifos[channel];
inst = nouveau_chip_instance_get(dev,
chan->ramin_grctx);
@ -630,22 +612,9 @@ static int nouveau_fifo_alloc(drm_device_t* dev,drm_nouveau_fifo_alloc_t* init,
/* reenable the fifo caches */
NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
/* make the fifo available to user space */
/* first, the fifo control regs */
init->ctrl = dev_priv->mmio->offset + NV03_FIFO_REGS(init->channel);
init->ctrl_size = NV03_FIFO_REGS_SIZE;
ret = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS,
0, &dev_priv->fifos[init->channel].regs);
if (ret != 0)
return ret;
/* pass back FIFO map info to the caller */
init->cmdbuf = dev_priv->fifos[init->channel].cmdbuf_mem->start;
init->cmdbuf_size = dev_priv->fifos[init->channel].cmdbuf_mem->size;
dev_priv->fifo_alloc_count++;
DRM_INFO("%s: initialised FIFO %d\n", __func__, init->channel);
DRM_INFO("%s: initialised FIFO %d\n", __func__, channel);
return 0;
}
@ -709,20 +678,6 @@ void nouveau_fifo_cleanup(drm_device_t* dev, DRMFILE filp)
for(i=0;i<nouveau_fifo_number(dev);i++)
if (dev_priv->fifos[i].used && dev_priv->fifos[i].filp==filp)
nouveau_fifo_free(dev,i);
/* check we still point at an active channel */
if (dev_priv->fifos[dev_priv->cur_fifo].used == 0) {
DRM_DEBUG("%s: cur_fifo is no longer owned.\n", __func__);
for (i=0;i<nouveau_fifo_number(dev);i++)
if (dev_priv->fifos[i].used) break;
if (i==nouveau_fifo_number(dev))
i=0;
DRM_DEBUG("%s: new cur_fifo is %d\n", __func__, i);
dev_priv->cur_fifo = i;
}
/* if (dev_priv->cmdbuf_alloc)
nouveau_fifo_init(dev);*/
}
int
@ -744,15 +699,36 @@ nouveau_fifo_owner(drm_device_t *dev, DRMFILE filp, int channel)
static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_nouveau_private_t *dev_priv = dev->dev_private;
drm_nouveau_fifo_alloc_t init;
int res;
DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_fifo_alloc_t __user *) data, sizeof(init));
res=nouveau_fifo_alloc(dev,&init,filp);
if (!res)
DRM_COPY_TO_USER_IOCTL((drm_nouveau_fifo_alloc_t __user *)data, init, sizeof(init));
DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_fifo_alloc_t __user *) data,
sizeof(init));
return res;
res = nouveau_fifo_alloc(dev, &init.channel, filp);
if (res)
return res;
/* this should probably disappear in the next abi break? */
init.put_base = 0;
/* make the fifo available to user space */
/* first, the fifo control regs */
init.ctrl = dev_priv->mmio->offset + NV03_FIFO_REGS(init.channel);
init.ctrl_size = NV03_FIFO_REGS_SIZE;
res = drm_addmap(dev, init.ctrl, init.ctrl_size, _DRM_REGISTERS,
0, &dev_priv->fifos[init.channel].regs);
if (res != 0)
return res;
/* pass back FIFO map info to the caller */
init.cmdbuf = dev_priv->fifos[init.channel].cmdbuf_mem->start;
init.cmdbuf_size = dev_priv->fifos[init.channel].cmdbuf_mem->size;
DRM_COPY_TO_USER_IOCTL((drm_nouveau_fifo_alloc_t __user *)data,
init, sizeof(init));
return 0;
}
/***********************************