nv50: use same dma object for fb/tt access
We depend on the VM fully now for memory protection, separate DMA objects for VRAM and GART are unneccesary. However, until the next interface break (soon) a client can't depend on the objects being the same and must still call NV_OBJ_SET_DMA_* methods appropriately.main
parent
b9ed0f9950
commit
89cf2ee2e5
|
@ -754,6 +754,7 @@ int
|
||||||
nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data,
|
nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
struct drm_nouveau_mem_alloc *alloc = data;
|
struct drm_nouveau_mem_alloc *alloc = data;
|
||||||
struct mem_block *block;
|
struct mem_block *block;
|
||||||
|
|
||||||
|
@ -770,10 +771,15 @@ nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data,
|
||||||
alloc->offset=block->start;
|
alloc->offset=block->start;
|
||||||
alloc->flags=block->flags;
|
alloc->flags=block->flags;
|
||||||
|
|
||||||
|
if (dev_priv->card_type >= NV_50 && alloc->flags & NOUVEAU_MEM_FB)
|
||||||
|
alloc->offset += 512*1024*1024;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
int
|
||||||
|
nouveau_ioctl_mem_free(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
struct drm_nouveau_mem_free *memfree = data;
|
struct drm_nouveau_mem_free *memfree = data;
|
||||||
|
@ -781,6 +787,9 @@ int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *
|
||||||
|
|
||||||
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
|
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
|
||||||
|
|
||||||
|
if (dev_priv->card_type >= NV_50 && memfree->flags & NOUVEAU_MEM_FB)
|
||||||
|
memfree->offset -= 512*1024*1024;
|
||||||
|
|
||||||
block=NULL;
|
block=NULL;
|
||||||
if (memfree->flags & NOUVEAU_MEM_FB)
|
if (memfree->flags & NOUVEAU_MEM_FB)
|
||||||
block = find_block(dev_priv->fb_heap, memfree->offset);
|
block = find_block(dev_priv->fb_heap, memfree->offset);
|
||||||
|
|
|
@ -1036,8 +1036,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
||||||
/* VRAM ctxdma */
|
/* VRAM ctxdma */
|
||||||
if (dev_priv->card_type >= NV_50) {
|
if (dev_priv->card_type >= NV_50) {
|
||||||
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
|
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
|
||||||
512*1024*1024,
|
0, 0x100000000ULL,
|
||||||
dev_priv->fb_available_size,
|
|
||||||
NV_DMA_ACCESS_RW,
|
NV_DMA_ACCESS_RW,
|
||||||
NV_DMA_TARGET_AGP, &vram);
|
NV_DMA_TARGET_AGP, &vram);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -1059,6 +1058,9 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TT memory ctxdma */
|
/* TT memory ctxdma */
|
||||||
|
if (dev_priv->card_type >= NV_50) {
|
||||||
|
tt = vram;
|
||||||
|
} else
|
||||||
if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
|
if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
|
||||||
ret = nouveau_gpuobj_gart_dma_new(chan, 0,
|
ret = nouveau_gpuobj_gart_dma_new(chan, 0,
|
||||||
dev_priv->gart_info.aper_size,
|
dev_priv->gart_info.aper_size,
|
||||||
|
|
Loading…
Reference in New Issue