Merge branch 'master' of git+ssh://git.freedesktop.org/git/mesa/drm into modesetting-101
Conflicts: linux-core/drm_bo.c linux-core/drm_objects.h shared-core/i915_dma.c shared-core/i915_drv.hmain
commit
90bfc8e611
|
@ -516,8 +516,11 @@ static int drm_load(drm_device_t *dev)
|
||||||
DRM_DEBUG( "\n" );
|
DRM_DEBUG( "\n" );
|
||||||
|
|
||||||
dev->irq = pci_get_irq(dev->device);
|
dev->irq = pci_get_irq(dev->device);
|
||||||
/* XXX Fix domain number (alpha hoses) */
|
#if defined(__FreeBSD__) && __FreeBSD_version >= 700053
|
||||||
|
dev->pci_domain = pci_get_domain(dev->device);
|
||||||
|
#else
|
||||||
dev->pci_domain = 0;
|
dev->pci_domain = 0;
|
||||||
|
#endif
|
||||||
dev->pci_bus = pci_get_bus(dev->device);
|
dev->pci_bus = pci_get_bus(dev->device);
|
||||||
dev->pci_slot = pci_get_slot(dev->device);
|
dev->pci_slot = pci_get_slot(dev->device);
|
||||||
dev->pci_func = pci_get_function(dev->device);
|
dev->pci_func = pci_get_function(dev->device);
|
||||||
|
|
406
libdrm/xf86drm.c
406
libdrm/xf86drm.c
|
@ -2345,7 +2345,7 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,
|
||||||
* DRM_FENCE_MASK_DRIVER
|
* DRM_FENCE_MASK_DRIVER
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int drmFenceCreate(int fd, unsigned flags, int class, unsigned type,
|
int drmFenceCreate(int fd, unsigned flags, int fence_class, unsigned type,
|
||||||
drmFence *fence)
|
drmFence *fence)
|
||||||
{
|
{
|
||||||
drm_fence_arg_t arg;
|
drm_fence_arg_t arg;
|
||||||
|
@ -2353,11 +2353,12 @@ int drmFenceCreate(int fd, unsigned flags, int class, unsigned type,
|
||||||
memset(&arg, 0, sizeof(arg));
|
memset(&arg, 0, sizeof(arg));
|
||||||
arg.flags = flags;
|
arg.flags = flags;
|
||||||
arg.type = type;
|
arg.type = type;
|
||||||
arg.class = class;
|
arg.fence_class = fence_class;
|
||||||
|
|
||||||
if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
|
if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
|
||||||
return -errno;
|
return -errno;
|
||||||
fence->handle = arg.handle;
|
fence->handle = arg.handle;
|
||||||
fence->class = arg.class;
|
fence->fence_class = arg.fence_class;
|
||||||
fence->type = arg.type;
|
fence->type = arg.type;
|
||||||
fence->flags = arg.flags;
|
fence->flags = arg.flags;
|
||||||
fence->signaled = 0;
|
fence->signaled = 0;
|
||||||
|
@ -2370,35 +2371,25 @@ int drmFenceCreate(int fd, unsigned flags, int class, unsigned type,
|
||||||
* DRM_FENCE_MASK_DRIVER
|
* DRM_FENCE_MASK_DRIVER
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int drmFenceBuffers(int fd, unsigned flags, drmFence *fence)
|
int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fence)
|
||||||
{
|
{
|
||||||
drm_fence_arg_t arg;
|
drm_fence_arg_t arg;
|
||||||
|
|
||||||
memset(&arg, 0, sizeof(arg));
|
memset(&arg, 0, sizeof(arg));
|
||||||
arg.flags = flags;
|
arg.flags = flags;
|
||||||
|
arg.fence_class = fence_class;
|
||||||
|
|
||||||
if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
|
if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
|
||||||
return -errno;
|
return -errno;
|
||||||
fence->handle = arg.handle;
|
fence->handle = arg.handle;
|
||||||
fence->class = arg.class;
|
fence->fence_class = arg.fence_class;
|
||||||
fence->type = arg.type;
|
fence->type = arg.type;
|
||||||
fence->flags = arg.flags;
|
fence->flags = arg.flags;
|
||||||
|
fence->sequence = arg.sequence;
|
||||||
fence->signaled = 0;
|
fence->signaled = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int drmFenceDestroy(int fd, const drmFence *fence)
|
|
||||||
{
|
|
||||||
drm_fence_arg_t arg;
|
|
||||||
|
|
||||||
memset(&arg, 0, sizeof(arg));
|
|
||||||
arg.handle = fence->handle;
|
|
||||||
|
|
||||||
if (ioctl(fd, DRM_IOCTL_FENCE_DESTROY, &arg))
|
|
||||||
return -errno;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drmFenceReference(int fd, unsigned handle, drmFence *fence)
|
int drmFenceReference(int fd, unsigned handle, drmFence *fence)
|
||||||
{
|
{
|
||||||
drm_fence_arg_t arg;
|
drm_fence_arg_t arg;
|
||||||
|
@ -2409,7 +2400,7 @@ int drmFenceReference(int fd, unsigned handle, drmFence *fence)
|
||||||
if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
|
if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
|
||||||
return -errno;
|
return -errno;
|
||||||
fence->handle = arg.handle;
|
fence->handle = arg.handle;
|
||||||
fence->class = arg.class;
|
fence->fence_class = arg.fence_class;
|
||||||
fence->type = arg.type;
|
fence->type = arg.type;
|
||||||
fence->flags = arg.flags;
|
fence->flags = arg.flags;
|
||||||
fence->signaled = arg.signaled;
|
fence->signaled = arg.signaled;
|
||||||
|
@ -2438,7 +2429,7 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
|
||||||
|
|
||||||
if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
|
if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
|
||||||
return -errno;
|
return -errno;
|
||||||
fence->class = arg.class;
|
fence->fence_class = arg.fence_class;
|
||||||
fence->type = arg.type;
|
fence->type = arg.type;
|
||||||
fence->signaled = arg.signaled;
|
fence->signaled = arg.signaled;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2453,7 +2444,7 @@ int drmFenceUpdate(int fd, drmFence *fence)
|
||||||
|
|
||||||
if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
|
if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
|
||||||
return -errno;
|
return -errno;
|
||||||
fence->class = arg.class;
|
fence->fence_class = arg.fence_class;
|
||||||
fence->type = arg.type;
|
fence->type = arg.type;
|
||||||
fence->signaled = arg.signaled;
|
fence->signaled = arg.signaled;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2486,14 +2477,14 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
|
||||||
drm_fence_arg_t arg;
|
drm_fence_arg_t arg;
|
||||||
|
|
||||||
memset(&arg, 0, sizeof(arg));
|
memset(&arg, 0, sizeof(arg));
|
||||||
arg.class = fence->class;
|
arg.fence_class = fence->fence_class;
|
||||||
arg.flags = flags;
|
arg.flags = flags;
|
||||||
arg.handle = fence->handle;
|
arg.handle = fence->handle;
|
||||||
arg.type = emit_type;
|
arg.type = emit_type;
|
||||||
|
|
||||||
if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
|
if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
|
||||||
return -errno;
|
return -errno;
|
||||||
fence->class = arg.class;
|
fence->fence_class = arg.fence_class;
|
||||||
fence->type = arg.type;
|
fence->type = arg.type;
|
||||||
fence->signaled = arg.signaled;
|
fence->signaled = arg.signaled;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2532,150 +2523,12 @@ int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type)
|
||||||
if (ret)
|
if (ret)
|
||||||
return -errno;
|
return -errno;
|
||||||
|
|
||||||
fence->class = arg.class;
|
fence->fence_class = arg.fence_class;
|
||||||
fence->type = arg.type;
|
fence->type = arg.type;
|
||||||
fence->signaled = arg.signaled;
|
fence->signaled = arg.signaled;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drmAdjustListNodes(drmBOList *list)
|
|
||||||
{
|
|
||||||
drmBONode *node;
|
|
||||||
drmMMListHead *l;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
while(list->numCurrent < list->numTarget) {
|
|
||||||
node = (drmBONode *) malloc(sizeof(*node));
|
|
||||||
if (!node) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
list->numCurrent++;
|
|
||||||
DRMLISTADD(&node->head, &list->free);
|
|
||||||
}
|
|
||||||
|
|
||||||
while(list->numCurrent > list->numTarget) {
|
|
||||||
l = list->free.next;
|
|
||||||
if (l == &list->free)
|
|
||||||
break;
|
|
||||||
DRMLISTDEL(l);
|
|
||||||
node = DRMLISTENTRY(drmBONode, l, head);
|
|
||||||
free(node);
|
|
||||||
list->numCurrent--;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void drmBOFreeList(drmBOList *list)
|
|
||||||
{
|
|
||||||
drmBONode *node;
|
|
||||||
drmMMListHead *l;
|
|
||||||
|
|
||||||
l = list->list.next;
|
|
||||||
while(l != &list->list) {
|
|
||||||
DRMLISTDEL(l);
|
|
||||||
node = DRMLISTENTRY(drmBONode, l, head);
|
|
||||||
free(node);
|
|
||||||
l = list->list.next;
|
|
||||||
list->numCurrent--;
|
|
||||||
list->numOnList--;
|
|
||||||
}
|
|
||||||
|
|
||||||
l = list->free.next;
|
|
||||||
while(l != &list->free) {
|
|
||||||
DRMLISTDEL(l);
|
|
||||||
node = DRMLISTENTRY(drmBONode, l, head);
|
|
||||||
free(node);
|
|
||||||
l = list->free.next;
|
|
||||||
list->numCurrent--;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int drmBOResetList(drmBOList *list)
|
|
||||||
{
|
|
||||||
drmMMListHead *l;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = drmAdjustListNodes(list);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
l = list->list.next;
|
|
||||||
while (l != &list->list) {
|
|
||||||
DRMLISTDEL(l);
|
|
||||||
DRMLISTADD(l, &list->free);
|
|
||||||
list->numOnList--;
|
|
||||||
l = list->list.next;
|
|
||||||
}
|
|
||||||
return drmAdjustListNodes(list);
|
|
||||||
}
|
|
||||||
|
|
||||||
static drmBONode *drmAddListItem(drmBOList *list, drmBO *item,
|
|
||||||
unsigned long arg0,
|
|
||||||
unsigned long arg1)
|
|
||||||
{
|
|
||||||
drmBONode *node;
|
|
||||||
drmMMListHead *l;
|
|
||||||
|
|
||||||
l = list->free.next;
|
|
||||||
if (l == &list->free) {
|
|
||||||
node = (drmBONode *) malloc(sizeof(*node));
|
|
||||||
if (!node) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
list->numCurrent++;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
DRMLISTDEL(l);
|
|
||||||
node = DRMLISTENTRY(drmBONode, l, head);
|
|
||||||
}
|
|
||||||
node->buf = item;
|
|
||||||
node->arg0 = arg0;
|
|
||||||
node->arg1 = arg1;
|
|
||||||
DRMLISTADD(&node->head, &list->list);
|
|
||||||
list->numOnList++;
|
|
||||||
return node;
|
|
||||||
}
|
|
||||||
|
|
||||||
void *drmBOListIterator(drmBOList *list)
|
|
||||||
{
|
|
||||||
void *ret = list->list.next;
|
|
||||||
|
|
||||||
if (ret == &list->list)
|
|
||||||
return NULL;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void *drmBOListNext(drmBOList *list, void *iterator)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
|
|
||||||
drmMMListHead *l = (drmMMListHead *) iterator;
|
|
||||||
ret = l->next;
|
|
||||||
if (ret == &list->list)
|
|
||||||
return NULL;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
drmBO *drmBOListBuf(void *iterator)
|
|
||||||
{
|
|
||||||
drmBONode *node;
|
|
||||||
drmMMListHead *l = (drmMMListHead *) iterator;
|
|
||||||
node = DRMLISTENTRY(drmBONode, l, head);
|
|
||||||
return node->buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int drmBOCreateList(int numTarget, drmBOList *list)
|
|
||||||
{
|
|
||||||
DRMINITLISTHEAD(&list->list);
|
|
||||||
DRMINITLISTHEAD(&list->free);
|
|
||||||
list->numTarget = numTarget;
|
|
||||||
list->numCurrent = 0;
|
|
||||||
list->numOnList = 0;
|
|
||||||
return drmAdjustListNodes(list);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf)
|
static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf)
|
||||||
{
|
{
|
||||||
buf->handle = rep->handle;
|
buf->handle = rep->handle;
|
||||||
|
@ -2695,8 +2548,8 @@ static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int drmBOCreate(int fd, unsigned long start, unsigned long size,
|
int drmBOCreate(int fd, unsigned long size,
|
||||||
unsigned pageAlignment, void *user_buffer, drm_bo_type_t type,
|
unsigned pageAlignment, void *user_buffer,
|
||||||
uint64_t mask,
|
uint64_t mask,
|
||||||
unsigned hint, drmBO *buf)
|
unsigned hint, drmBO *buf)
|
||||||
{
|
{
|
||||||
|
@ -2710,26 +2563,11 @@ int drmBOCreate(int fd, unsigned long start, unsigned long size,
|
||||||
req->mask = mask;
|
req->mask = mask;
|
||||||
req->hint = hint;
|
req->hint = hint;
|
||||||
req->size = size;
|
req->size = size;
|
||||||
req->type = type;
|
|
||||||
req->page_alignment = pageAlignment;
|
req->page_alignment = pageAlignment;
|
||||||
|
req->buffer_start = (unsigned long) user_buffer;
|
||||||
|
|
||||||
buf->virtual = NULL;
|
buf->virtual = NULL;
|
||||||
|
|
||||||
switch(type) {
|
|
||||||
case drm_bo_type_dc:
|
|
||||||
req->buffer_start = start;
|
|
||||||
break;
|
|
||||||
case drm_bo_type_user:
|
|
||||||
req->buffer_start = (unsigned long) user_buffer;
|
|
||||||
buf->virtual = user_buffer;
|
|
||||||
break;
|
|
||||||
case drm_bo_type_fake:
|
|
||||||
req->buffer_start = start;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
ret = ioctl(fd, DRM_IOCTL_BO_CREATE, &arg);
|
ret = ioctl(fd, DRM_IOCTL_BO_CREATE, &arg);
|
||||||
} while (ret != 0 && errno == EAGAIN);
|
} while (ret != 0 && errno == EAGAIN);
|
||||||
|
@ -2744,26 +2582,6 @@ int drmBOCreate(int fd, unsigned long start, unsigned long size,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int drmBODestroy(int fd, drmBO *buf)
|
|
||||||
{
|
|
||||||
struct drm_bo_handle_arg arg;
|
|
||||||
|
|
||||||
if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) {
|
|
||||||
(void) drmUnmap(buf->mapVirtual, buf->start + buf->size);
|
|
||||||
buf->mapVirtual = NULL;
|
|
||||||
buf->virtual = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(&arg, 0, sizeof(arg));
|
|
||||||
arg.handle = buf->handle;
|
|
||||||
|
|
||||||
if (ioctl(fd, DRM_IOCTL_BO_DESTROY, &arg))
|
|
||||||
return -errno;
|
|
||||||
|
|
||||||
buf->handle = 0;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drmBOReference(int fd, unsigned handle, drmBO *buf)
|
int drmBOReference(int fd, unsigned handle, drmBO *buf)
|
||||||
{
|
{
|
||||||
struct drm_bo_reference_info_arg arg;
|
struct drm_bo_reference_info_arg arg;
|
||||||
|
@ -2777,7 +2595,6 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf)
|
||||||
return -errno;
|
return -errno;
|
||||||
|
|
||||||
drmBOCopyReply(rep, buf);
|
drmBOCopyReply(rep, buf);
|
||||||
buf->type = drm_bo_type_dc;
|
|
||||||
buf->mapVirtual = NULL;
|
buf->mapVirtual = NULL;
|
||||||
buf->mapCount = 0;
|
buf->mapCount = 0;
|
||||||
buf->virtual = NULL;
|
buf->virtual = NULL;
|
||||||
|
@ -2785,11 +2602,11 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int drmBOUnReference(int fd, drmBO *buf)
|
int drmBOUnreference(int fd, drmBO *buf)
|
||||||
{
|
{
|
||||||
struct drm_bo_handle_arg arg;
|
struct drm_bo_handle_arg arg;
|
||||||
|
|
||||||
if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) {
|
if (buf->mapVirtual) {
|
||||||
(void) munmap(buf->mapVirtual, buf->start + buf->size);
|
(void) munmap(buf->mapVirtual, buf->start + buf->size);
|
||||||
buf->mapVirtual = NULL;
|
buf->mapVirtual = NULL;
|
||||||
buf->virtual = NULL;
|
buf->virtual = NULL;
|
||||||
|
@ -2824,7 +2641,7 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
|
||||||
* Make sure we have a virtual address of the buffer.
|
* Make sure we have a virtual address of the buffer.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (!buf->virtual && buf->type != drm_bo_type_fake) {
|
if (!buf->virtual) {
|
||||||
drmAddress virtual;
|
drmAddress virtual;
|
||||||
virtual = mmap(0, buf->size + buf->start,
|
virtual = mmap(0, buf->size + buf->start,
|
||||||
PROT_READ | PROT_WRITE, MAP_SHARED,
|
PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||||
|
@ -2878,7 +2695,7 @@ int drmBOUnmap(int fd, drmBO *buf)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int drmBOValidate(int fd, drmBO *buf,
|
int drmBOValidate(int fd, drmBO *buf, uint32_t fence_class,
|
||||||
uint64_t flags, uint64_t mask,
|
uint64_t flags, uint64_t mask,
|
||||||
unsigned hint)
|
unsigned hint)
|
||||||
{
|
{
|
||||||
|
@ -2892,7 +2709,7 @@ int drmBOValidate(int fd, drmBO *buf,
|
||||||
req->bo_req.flags = flags;
|
req->bo_req.flags = flags;
|
||||||
req->bo_req.mask = mask;
|
req->bo_req.mask = mask;
|
||||||
req->bo_req.hint = hint;
|
req->bo_req.hint = hint;
|
||||||
req->bo_req.fence_class = 0; /* Backwards compatibility. */
|
req->bo_req.fence_class = fence_class;
|
||||||
req->op = drm_bo_validate;
|
req->op = drm_bo_validate;
|
||||||
|
|
||||||
do{
|
do{
|
||||||
|
@ -3016,185 +2833,6 @@ int drmBOBusy(int fd, drmBO *buf, int *busy)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
|
|
||||||
unsigned mask,
|
|
||||||
int *newItem)
|
|
||||||
{
|
|
||||||
drmBONode *node, *cur;
|
|
||||||
drmMMListHead *l;
|
|
||||||
|
|
||||||
*newItem = 0;
|
|
||||||
cur = NULL;
|
|
||||||
|
|
||||||
for (l = list->list.next; l != &list->list; l = l->next) {
|
|
||||||
node = DRMLISTENTRY(drmBONode, l, head);
|
|
||||||
if (node->buf == buf) {
|
|
||||||
cur = node;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!cur) {
|
|
||||||
cur = drmAddListItem(list, buf, flags, mask);
|
|
||||||
if (!cur) {
|
|
||||||
drmMsg("Out of memory creating validate list node.\n");
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
*newItem = 1;
|
|
||||||
cur->arg0 = flags;
|
|
||||||
cur->arg1 = mask;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
unsigned memMask = (cur->arg1 | mask) & DRM_BO_MASK_MEM;
|
|
||||||
unsigned memFlags = cur->arg0 & flags & memMask;
|
|
||||||
|
|
||||||
if (!memFlags) {
|
|
||||||
drmMsg("Incompatible memory location requests "
|
|
||||||
"on validate list.\n");
|
|
||||||
drmMsg("Previous flag: 0x%08lx, mask: 0x%08lx\n",
|
|
||||||
cur->arg0, cur->arg1);
|
|
||||||
drmMsg("Current flag: 0x%08lx, mask: 0x%08lx\n",
|
|
||||||
flags, mask);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
|
|
||||||
drmMsg("Incompatible buffer flag requests "
|
|
||||||
"on validate list.\n");
|
|
||||||
drmMsg("Previous flag: 0x%08lx, mask: 0x%08lx\n",
|
|
||||||
cur->arg0, cur->arg1);
|
|
||||||
drmMsg("Current flag: 0x%08lx, mask: 0x%08lx\n",
|
|
||||||
flags, mask);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
cur->arg1 |= mask;
|
|
||||||
cur->arg0 = memFlags | ((cur->arg0 | flags) &
|
|
||||||
cur->arg1 & ~DRM_BO_MASK_MEM);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int drmBOValidateList(int fd, drmBOList *list)
|
|
||||||
{
|
|
||||||
drmBONode *node;
|
|
||||||
drmMMListHead *l;
|
|
||||||
struct drm_bo_op_arg *arg, *first;
|
|
||||||
struct drm_bo_op_req *req;
|
|
||||||
struct drm_bo_arg_rep *rep;
|
|
||||||
uint64_t *prevNext = NULL;
|
|
||||||
drmBO *buf;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
first = NULL;
|
|
||||||
|
|
||||||
for (l = list->list.next; l != &list->list; l = l->next) {
|
|
||||||
node = DRMLISTENTRY(drmBONode, l, head);
|
|
||||||
|
|
||||||
arg = &node->bo_arg;
|
|
||||||
req = &arg->d.req;
|
|
||||||
|
|
||||||
if (!first)
|
|
||||||
first = arg;
|
|
||||||
|
|
||||||
if (prevNext)
|
|
||||||
*prevNext = (unsigned long) arg;
|
|
||||||
|
|
||||||
memset(arg, 0, sizeof(*arg));
|
|
||||||
prevNext = &arg->next;
|
|
||||||
req->bo_req.handle = node->buf->handle;
|
|
||||||
req->op = drm_bo_validate;
|
|
||||||
req->bo_req.flags = node->arg0;
|
|
||||||
req->bo_req.hint = 0;
|
|
||||||
req->bo_req.mask = node->arg1;
|
|
||||||
req->bo_req.fence_class = 0; /* Backwards compat. */
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!first)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
do{
|
|
||||||
ret = ioctl(fd, DRM_IOCTL_BO_OP, first);
|
|
||||||
} while (ret && errno == EAGAIN);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
return -errno;
|
|
||||||
|
|
||||||
for (l = list->list.next; l != &list->list; l = l->next) {
|
|
||||||
node = DRMLISTENTRY(drmBONode, l, head);
|
|
||||||
arg = &node->bo_arg;
|
|
||||||
rep = &arg->d.rep;
|
|
||||||
|
|
||||||
if (!arg->handled) {
|
|
||||||
drmMsg("Unhandled request\n");
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
if (rep->ret)
|
|
||||||
return rep->ret;
|
|
||||||
|
|
||||||
buf = node->buf;
|
|
||||||
drmBOCopyReply(&rep->bo_info, buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
|
|
||||||
{
|
|
||||||
drmBONode *node;
|
|
||||||
drmMMListHead *l;
|
|
||||||
struct drm_bo_op_arg *arg, *first;
|
|
||||||
struct drm_bo_op_req *req;
|
|
||||||
struct drm_bo_arg_rep *rep;
|
|
||||||
uint64_t *prevNext = NULL;
|
|
||||||
drmBO *buf;
|
|
||||||
unsigned fence_flags;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
first = NULL;
|
|
||||||
|
|
||||||
for (l = list->list.next; l != &list->list; l = l->next) {
|
|
||||||
node = DRMLISTENTRY(drmBONode, l, head);
|
|
||||||
|
|
||||||
arg = &node->bo_arg;
|
|
||||||
req = &arg->d.req;
|
|
||||||
|
|
||||||
if (!first)
|
|
||||||
first = arg;
|
|
||||||
|
|
||||||
if (prevNext)
|
|
||||||
*prevNext = (unsigned long) arg;
|
|
||||||
|
|
||||||
memset(arg, 0, sizeof(*arg));
|
|
||||||
prevNext = &arg->next;
|
|
||||||
req->bo_req.handle = node->buf->handle;
|
|
||||||
req->op = drm_bo_fence;
|
|
||||||
req->bo_req.mask = node->arg0;
|
|
||||||
req->arg_handle = fenceHandle;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!first)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ret = ioctl(fd, DRM_IOCTL_BO_OP, first);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
return -errno;
|
|
||||||
|
|
||||||
for (l = list->list.next; l != &list->list; l = l->next) {
|
|
||||||
node = DRMLISTENTRY(drmBONode, l, head);
|
|
||||||
|
|
||||||
arg = &node->bo_arg;
|
|
||||||
rep = &arg->d.rep;
|
|
||||||
|
|
||||||
if (!arg->handled)
|
|
||||||
return -EFAULT;
|
|
||||||
if (rep->ret)
|
|
||||||
return rep->ret;
|
|
||||||
drmBOCopyReply(&rep->bo_info, node->buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
|
int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
|
||||||
unsigned memType)
|
unsigned memType)
|
||||||
{
|
{
|
||||||
|
|
|
@ -96,16 +96,16 @@ typedef struct _drmMMListHead
|
||||||
typedef struct _drmFence
|
typedef struct _drmFence
|
||||||
{
|
{
|
||||||
unsigned handle;
|
unsigned handle;
|
||||||
int class;
|
int fence_class;
|
||||||
unsigned type;
|
unsigned type;
|
||||||
unsigned flags;
|
unsigned flags;
|
||||||
unsigned signaled;
|
unsigned signaled;
|
||||||
|
uint32_t sequence;
|
||||||
unsigned pad[4]; /* for future expansion */
|
unsigned pad[4]; /* for future expansion */
|
||||||
} drmFence;
|
} drmFence;
|
||||||
|
|
||||||
typedef struct _drmBO
|
typedef struct _drmBO
|
||||||
{
|
{
|
||||||
drm_bo_type_t type;
|
|
||||||
unsigned handle;
|
unsigned handle;
|
||||||
uint64_t mapHandle;
|
uint64_t mapHandle;
|
||||||
uint64_t flags;
|
uint64_t flags;
|
||||||
|
@ -126,31 +126,12 @@ typedef struct _drmBO
|
||||||
unsigned pad[8]; /* for future expansion */
|
unsigned pad[8]; /* for future expansion */
|
||||||
} drmBO;
|
} drmBO;
|
||||||
|
|
||||||
typedef struct _drmBONode
|
|
||||||
{
|
|
||||||
drmMMListHead head;
|
|
||||||
drmBO *buf;
|
|
||||||
struct drm_bo_op_arg bo_arg;
|
|
||||||
unsigned long arg0;
|
|
||||||
unsigned long arg1;
|
|
||||||
} drmBONode;
|
|
||||||
|
|
||||||
typedef struct _drmBOList {
|
|
||||||
unsigned numTarget;
|
|
||||||
unsigned numCurrent;
|
|
||||||
unsigned numOnList;
|
|
||||||
drmMMListHead list;
|
|
||||||
drmMMListHead free;
|
|
||||||
} drmBOList;
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fence functions.
|
* Fence functions.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int drmFenceCreate(int fd, unsigned flags, int class,
|
extern int drmFenceCreate(int fd, unsigned flags, int fence_class,
|
||||||
unsigned type, drmFence *fence);
|
unsigned type, drmFence *fence);
|
||||||
extern int drmFenceDestroy(int fd, const drmFence *fence);
|
|
||||||
extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
|
extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
|
||||||
extern int drmFenceUnreference(int fd, const drmFence *fence);
|
extern int drmFenceUnreference(int fd, const drmFence *fence);
|
||||||
extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
|
extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
|
||||||
|
@ -160,46 +141,28 @@ extern int drmFenceWait(int fd, unsigned flags, drmFence *fence,
|
||||||
unsigned flush_type);
|
unsigned flush_type);
|
||||||
extern int drmFenceEmit(int fd, unsigned flags, drmFence *fence,
|
extern int drmFenceEmit(int fd, unsigned flags, drmFence *fence,
|
||||||
unsigned emit_type);
|
unsigned emit_type);
|
||||||
extern int drmFenceBuffers(int fd, unsigned flags, drmFence *fence);
|
extern int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fence);
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Buffer object list functions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
extern void drmBOFreeList(drmBOList *list);
|
|
||||||
extern int drmBOResetList(drmBOList *list);
|
|
||||||
extern void *drmBOListIterator(drmBOList *list);
|
|
||||||
extern void *drmBOListNext(drmBOList *list, void *iterator);
|
|
||||||
extern drmBO *drmBOListBuf(void *iterator);
|
|
||||||
extern int drmBOCreateList(int numTarget, drmBOList *list);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Buffer object functions.
|
* Buffer object functions.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int drmBOCreate(int fd, unsigned long start, unsigned long size,
|
extern int drmBOCreate(int fd, unsigned long size,
|
||||||
unsigned pageAlignment, void *user_buffer,
|
unsigned pageAlignment, void *user_buffer,
|
||||||
drm_bo_type_t type, uint64_t mask,
|
uint64_t mask, unsigned hint, drmBO *buf);
|
||||||
unsigned hint, drmBO *buf);
|
|
||||||
extern int drmBODestroy(int fd, drmBO *buf);
|
|
||||||
extern int drmBOReference(int fd, unsigned handle, drmBO *buf);
|
extern int drmBOReference(int fd, unsigned handle, drmBO *buf);
|
||||||
extern int drmBOUnReference(int fd, drmBO *buf);
|
extern int drmBOUnreference(int fd, drmBO *buf);
|
||||||
extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
|
extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
|
||||||
void **address);
|
void **address);
|
||||||
extern int drmBOUnmap(int fd, drmBO *buf);
|
extern int drmBOUnmap(int fd, drmBO *buf);
|
||||||
extern int drmBOValidate(int fd, drmBO *buf, uint64_t flags,
|
extern int drmBOValidate(int fd, drmBO *buf, uint32_t fence_class, uint64_t flags,
|
||||||
uint64_t mask, unsigned hint);
|
uint64_t mask, unsigned hint);
|
||||||
|
|
||||||
extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle);
|
extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle);
|
||||||
extern int drmBOInfo(int fd, drmBO *buf);
|
extern int drmBOInfo(int fd, drmBO *buf);
|
||||||
extern int drmBOBusy(int fd, drmBO *buf, int *busy);
|
extern int drmBOBusy(int fd, drmBO *buf, int *busy);
|
||||||
|
|
||||||
extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
|
|
||||||
unsigned mask,
|
|
||||||
int *newItem);
|
|
||||||
extern int drmBOValidateList(int fd, drmBOList *list);
|
|
||||||
extern int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle);
|
|
||||||
extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint);
|
extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint);
|
||||||
int drmBOSetPin(int fd, drmBO *buf, int pin);
|
int drmBOSetPin(int fd, drmBO *buf, int pin);
|
||||||
|
|
||||||
|
|
|
@ -163,7 +163,7 @@ endif
|
||||||
all: modules
|
all: modules
|
||||||
|
|
||||||
modules: includes
|
modules: includes
|
||||||
make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules
|
+make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules
|
||||||
|
|
||||||
ifeq ($(HEADERFROMBOOT),1)
|
ifeq ($(HEADERFROMBOOT),1)
|
||||||
|
|
||||||
|
@ -269,7 +269,7 @@ PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \
|
||||||
ifneq ($(PAGE_AGP),0)
|
ifneq ($(PAGE_AGP),0)
|
||||||
EXTRA_CFLAGS += -DHAVE_PAGE_AGP
|
EXTRA_CFLAGS += -DHAVE_PAGE_AGP
|
||||||
endif
|
endif
|
||||||
EXTRA_CFLAGS += -g -O0
|
EXTRA_CFLAGS += -g
|
||||||
|
|
||||||
# Start with all modules turned off.
|
# Start with all modules turned off.
|
||||||
CONFIG_DRM_GAMMA := n
|
CONFIG_DRM_GAMMA := n
|
||||||
|
|
|
@ -23,13 +23,13 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
|
||||||
i915_buffer.o intel_display.o intel_crt.o intel_lvds.o \
|
i915_buffer.o intel_display.o intel_crt.o intel_lvds.o \
|
||||||
intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o
|
intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o
|
||||||
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
|
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
|
||||||
nouveau_object.o nouveau_irq.o nouveau_notifier.o \
|
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
|
||||||
nouveau_sgdma.o nouveau_dma.o \
|
nouveau_sgdma.o nouveau_dma.o \
|
||||||
nv04_timer.o \
|
nv04_timer.o \
|
||||||
nv04_mc.o nv40_mc.o nv50_mc.o \
|
nv04_mc.o nv40_mc.o nv50_mc.o \
|
||||||
nv04_fb.o nv10_fb.o nv40_fb.o \
|
nv04_fb.o nv10_fb.o nv40_fb.o \
|
||||||
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
|
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
|
||||||
nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \
|
nv04_graph.o nv10_graph.o nv20_graph.o \
|
||||||
nv40_graph.o nv50_graph.o \
|
nv40_graph.o nv50_graph.o \
|
||||||
nv04_instmem.o nv50_instmem.o
|
nv04_instmem.o nv50_instmem.o
|
||||||
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
|
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
|
||||||
|
|
|
@ -430,7 +430,6 @@ struct drm_file {
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct list_head refd_objects;
|
struct list_head refd_objects;
|
||||||
struct list_head user_objects;
|
|
||||||
|
|
||||||
struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
|
struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
|
||||||
struct file *filp;
|
struct file *filp;
|
||||||
|
|
|
@ -535,8 +535,7 @@ static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_p
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
|
static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
|
||||||
unsigned long offset,
|
struct drm_bo_mem_reg *bo_mem)
|
||||||
int cached)
|
|
||||||
{
|
{
|
||||||
struct drm_agp_ttm_backend *agp_be =
|
struct drm_agp_ttm_backend *agp_be =
|
||||||
container_of(backend, struct drm_agp_ttm_backend, backend);
|
container_of(backend, struct drm_agp_ttm_backend, backend);
|
||||||
|
@ -545,13 +544,14 @@ static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
|
||||||
|
|
||||||
DRM_DEBUG("drm_agp_bind_ttm\n");
|
DRM_DEBUG("drm_agp_bind_ttm\n");
|
||||||
mem->is_flushed = TRUE;
|
mem->is_flushed = TRUE;
|
||||||
mem->type = (cached) ? AGP_USER_CACHED_MEMORY :
|
mem->type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? AGP_USER_CACHED_MEMORY :
|
||||||
AGP_USER_MEMORY;
|
AGP_USER_MEMORY;
|
||||||
ret = drm_agp_bind_memory(mem, offset);
|
ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("AGP Bind memory failed\n");
|
DRM_ERROR("AGP Bind memory failed\n");
|
||||||
}
|
}
|
||||||
DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0,
|
DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
|
||||||
|
DRM_BE_FLAG_BOUND_CACHED : 0,
|
||||||
DRM_BE_FLAG_BOUND_CACHED);
|
DRM_BE_FLAG_BOUND_CACHED);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -643,7 +643,8 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
|
||||||
agp_be->bridge = dev->agp->bridge;
|
agp_be->bridge = dev->agp->bridge;
|
||||||
agp_be->populated = FALSE;
|
agp_be->populated = FALSE;
|
||||||
agp_be->backend.func = &agp_ttm_backend;
|
agp_be->backend.func = &agp_ttm_backend;
|
||||||
agp_be->backend.mem_type = DRM_BO_MEM_TT;
|
// agp_be->backend.mem_type = DRM_BO_MEM_TT;
|
||||||
|
agp_be->backend.dev = dev;
|
||||||
|
|
||||||
return &agp_be->backend;
|
return &agp_be->backend;
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,17 +142,12 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo)
|
||||||
|
|
||||||
switch (bo->type) {
|
switch (bo->type) {
|
||||||
case drm_bo_type_dc:
|
case drm_bo_type_dc:
|
||||||
bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
|
|
||||||
if (!bo->ttm)
|
|
||||||
ret = -ENOMEM;
|
|
||||||
break;
|
|
||||||
case drm_bo_type_kernel:
|
case drm_bo_type_kernel:
|
||||||
bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
|
bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
|
||||||
if (!bo->ttm)
|
if (!bo->ttm)
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
break;
|
break;
|
||||||
case drm_bo_type_user:
|
case drm_bo_type_user:
|
||||||
case drm_bo_type_fake:
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
DRM_ERROR("Illegal buffer object type\n");
|
DRM_ERROR("Illegal buffer object type\n");
|
||||||
|
@ -175,7 +170,8 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
|
||||||
struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
|
struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (old_is_pci || new_is_pci)
|
if (old_is_pci || new_is_pci ||
|
||||||
|
((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
|
||||||
ret = drm_bo_vm_pre_move(bo, old_is_pci);
|
ret = drm_bo_vm_pre_move(bo, old_is_pci);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -190,9 +186,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
if (mem->mem_type != DRM_BO_MEM_LOCAL) {
|
if (mem->mem_type != DRM_BO_MEM_LOCAL) {
|
||||||
ret = drm_bind_ttm(bo->ttm, new_man->flags &
|
ret = drm_bind_ttm(bo->ttm, mem);
|
||||||
DRM_BO_FLAG_CACHED,
|
|
||||||
mem->mm_node->start);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
@ -242,7 +236,9 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
|
||||||
_DRM_BO_FLAG_EVICTED);
|
_DRM_BO_FLAG_EVICTED);
|
||||||
|
|
||||||
if (bo->mem.mm_node)
|
if (bo->mem.mm_node)
|
||||||
bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
|
bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
|
||||||
|
bm->man[bo->mem.mem_type].gpu_offset;
|
||||||
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -290,6 +286,7 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_wait);
|
||||||
|
|
||||||
static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
|
static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
|
||||||
{
|
{
|
||||||
|
@ -531,38 +528,76 @@ void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
|
EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
|
||||||
|
|
||||||
|
void drm_putback_buffer_objects(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_buffer_manager *bm = &dev->bm;
|
||||||
|
struct list_head *list = &bm->unfenced;
|
||||||
|
struct drm_buffer_object *entry, *next;
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
list_for_each_entry_safe(entry, next, list, lru) {
|
||||||
|
atomic_inc(&entry->usage);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
mutex_lock(&entry->mutex);
|
||||||
|
BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
list_del_init(&entry->lru);
|
||||||
|
DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
|
||||||
|
DRM_WAKEUP(&entry->event_queue);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME: Might want to put back on head of list
|
||||||
|
* instead of tail here.
|
||||||
|
*/
|
||||||
|
|
||||||
|
drm_bo_add_to_lru(entry);
|
||||||
|
mutex_unlock(&entry->mutex);
|
||||||
|
drm_bo_usage_deref_locked(&entry);
|
||||||
|
}
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_putback_buffer_objects);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note. The caller has to register (if applicable)
|
* Note. The caller has to register (if applicable)
|
||||||
* and deregister fence object usage.
|
* and deregister fence object usage.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int drm_fence_buffer_objects(struct drm_file * file_priv,
|
int drm_fence_buffer_objects(struct drm_device *dev,
|
||||||
struct list_head *list,
|
struct list_head *list,
|
||||||
uint32_t fence_flags,
|
uint32_t fence_flags,
|
||||||
struct drm_fence_object * fence,
|
struct drm_fence_object * fence,
|
||||||
struct drm_fence_object ** used_fence)
|
struct drm_fence_object ** used_fence)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = file_priv->head->dev;
|
|
||||||
struct drm_buffer_manager *bm = &dev->bm;
|
struct drm_buffer_manager *bm = &dev->bm;
|
||||||
|
|
||||||
struct drm_buffer_object *entry;
|
struct drm_buffer_object *entry;
|
||||||
uint32_t fence_type = 0;
|
uint32_t fence_type = 0;
|
||||||
|
uint32_t fence_class = ~0;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct list_head *l;
|
struct list_head *l;
|
||||||
LIST_HEAD(f_list);
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (!list)
|
if (!list)
|
||||||
list = &bm->unfenced;
|
list = &bm->unfenced;
|
||||||
|
|
||||||
|
if (fence)
|
||||||
|
fence_class = fence->fence_class;
|
||||||
|
|
||||||
list_for_each_entry(entry, list, lru) {
|
list_for_each_entry(entry, list, lru) {
|
||||||
BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
|
BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
|
||||||
fence_type |= entry->fence_type;
|
fence_type |= entry->new_fence_type;
|
||||||
if (entry->fence_class != 0) {
|
if (fence_class == ~0)
|
||||||
DRM_ERROR("Fence class %d is not implemented yet.\n",
|
fence_class = entry->new_fence_class;
|
||||||
entry->fence_class);
|
else if (entry->new_fence_class != fence_class) {
|
||||||
|
DRM_ERROR("Unmatching fence classes on unfenced list: "
|
||||||
|
"%d and %d.\n",
|
||||||
|
fence_class,
|
||||||
|
entry->new_fence_class);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -574,16 +609,9 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Transfer to a local list before we release the dev->struct_mutex;
|
|
||||||
* This is so we don't get any new unfenced objects while fencing
|
|
||||||
* the ones we already have..
|
|
||||||
*/
|
|
||||||
|
|
||||||
list_splice_init(list, &f_list);
|
|
||||||
|
|
||||||
if (fence) {
|
if (fence) {
|
||||||
if ((fence_type & fence->type) != fence_type) {
|
if ((fence_type & fence->type) != fence_type ||
|
||||||
|
(fence->fence_class != fence_class)) {
|
||||||
DRM_ERROR("Given fence doesn't match buffers "
|
DRM_ERROR("Given fence doesn't match buffers "
|
||||||
"on unfenced list.\n");
|
"on unfenced list.\n");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
@ -591,7 +619,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
ret = drm_fence_object_create(dev, 0, fence_type,
|
ret = drm_fence_object_create(dev, fence_class, fence_type,
|
||||||
fence_flags | DRM_FENCE_FLAG_EMIT,
|
fence_flags | DRM_FENCE_FLAG_EMIT,
|
||||||
&fence);
|
&fence);
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
@ -600,8 +628,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
|
||||||
}
|
}
|
||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
l = f_list.next;
|
l = list->next;
|
||||||
while (l != &f_list) {
|
while (l != list) {
|
||||||
prefetch(l->next);
|
prefetch(l->next);
|
||||||
entry = list_entry(l, struct drm_buffer_object, lru);
|
entry = list_entry(l, struct drm_buffer_object, lru);
|
||||||
atomic_inc(&entry->usage);
|
atomic_inc(&entry->usage);
|
||||||
|
@ -609,11 +637,14 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
|
||||||
mutex_lock(&entry->mutex);
|
mutex_lock(&entry->mutex);
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
list_del_init(l);
|
list_del_init(l);
|
||||||
if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
|
if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED &&
|
||||||
|
entry->fence_class == fence_class) {
|
||||||
count++;
|
count++;
|
||||||
if (entry->fence)
|
if (entry->fence)
|
||||||
drm_fence_usage_deref_locked(&entry->fence);
|
drm_fence_usage_deref_locked(&entry->fence);
|
||||||
entry->fence = drm_fence_reference_locked(fence);
|
entry->fence = drm_fence_reference_locked(fence);
|
||||||
|
entry->fence_class = entry->new_fence_class;
|
||||||
|
entry->fence_type = entry->new_fence_type;
|
||||||
DRM_FLAG_MASKED(entry->priv_flags, 0,
|
DRM_FLAG_MASKED(entry->priv_flags, 0,
|
||||||
_DRM_BO_FLAG_UNFENCED);
|
_DRM_BO_FLAG_UNFENCED);
|
||||||
DRM_WAKEUP(&entry->event_queue);
|
DRM_WAKEUP(&entry->event_queue);
|
||||||
|
@ -621,7 +652,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
|
||||||
}
|
}
|
||||||
mutex_unlock(&entry->mutex);
|
mutex_unlock(&entry->mutex);
|
||||||
drm_bo_usage_deref_locked(&entry);
|
drm_bo_usage_deref_locked(&entry);
|
||||||
l = f_list.next;
|
l = list->next;
|
||||||
}
|
}
|
||||||
DRM_DEBUG("Fenced %d buffers\n", count);
|
DRM_DEBUG("Fenced %d buffers\n", count);
|
||||||
out:
|
out:
|
||||||
|
@ -629,7 +660,6 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
|
||||||
*used_fence = fence;
|
*used_fence = fence;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_fence_buffer_objects);
|
EXPORT_SYMBOL(drm_fence_buffer_objects);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -663,12 +693,6 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
|
||||||
evict_mem = bo->mem;
|
evict_mem = bo->mem;
|
||||||
evict_mem.mm_node = NULL;
|
evict_mem.mm_node = NULL;
|
||||||
|
|
||||||
if (bo->type == drm_bo_type_fake) {
|
|
||||||
bo->mem.mem_type = DRM_BO_MEM_LOCAL;
|
|
||||||
bo->mem.mm_node = NULL;
|
|
||||||
goto out1;
|
|
||||||
}
|
|
||||||
|
|
||||||
evict_mem = bo->mem;
|
evict_mem = bo->mem;
|
||||||
evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
|
evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
|
||||||
ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
|
ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
|
||||||
|
@ -688,7 +712,6 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
out1:
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
if (evict_mem.mm_node) {
|
if (evict_mem.mm_node) {
|
||||||
if (evict_mem.mm_node != bo->pinned_node)
|
if (evict_mem.mm_node != bo->pinned_node)
|
||||||
|
@ -944,6 +967,7 @@ struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
|
||||||
atomic_inc(&bo->usage);
|
atomic_inc(&bo->usage);
|
||||||
return bo;
|
return bo;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_lookup_buffer_object);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call bo->mutex locked.
|
* Call bo->mutex locked.
|
||||||
|
@ -1079,9 +1103,12 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
|
||||||
static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
|
static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
|
||||||
struct drm_bo_info_rep *rep)
|
struct drm_bo_info_rep *rep)
|
||||||
{
|
{
|
||||||
|
if (!rep)
|
||||||
|
return;
|
||||||
|
|
||||||
rep->handle = bo->base.hash.key;
|
rep->handle = bo->base.hash.key;
|
||||||
rep->flags = bo->mem.flags;
|
rep->flags = bo->mem.flags;
|
||||||
rep->size = bo->mem.num_pages * PAGE_SIZE;
|
rep->size = bo->num_pages * PAGE_SIZE;
|
||||||
rep->offset = bo->offset;
|
rep->offset = bo->offset;
|
||||||
rep->arg_handle = bo->map_list.user_token;
|
rep->arg_handle = bo->map_list.user_token;
|
||||||
rep->mask = bo->mem.mask;
|
rep->mask = bo->mem.mask;
|
||||||
|
@ -1260,7 +1287,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
mem.num_pages = bo->mem.num_pages;
|
mem.num_pages = bo->num_pages;
|
||||||
mem.size = mem.num_pages << PAGE_SHIFT;
|
mem.size = mem.num_pages << PAGE_SHIFT;
|
||||||
mem.mask = new_mem_flags;
|
mem.mask = new_mem_flags;
|
||||||
mem.page_alignment = bo->mem.page_alignment;
|
mem.page_alignment = bo->mem.page_alignment;
|
||||||
|
@ -1308,7 +1335,7 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
|
||||||
if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
|
if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
|
||||||
return 0;
|
return 0;
|
||||||
if ((flag_diff & DRM_BO_FLAG_CACHED) &&
|
if ((flag_diff & DRM_BO_FLAG_CACHED) &&
|
||||||
(!(mem->mask & DRM_BO_FLAG_CACHED) ||
|
(/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
|
||||||
(mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
|
(mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1319,44 +1346,6 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem)
|
|
||||||
{
|
|
||||||
struct drm_buffer_manager *bm = &dev->bm;
|
|
||||||
struct drm_mem_type_manager *man;
|
|
||||||
uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
|
|
||||||
const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
|
|
||||||
uint32_t i;
|
|
||||||
int type_ok = 0;
|
|
||||||
uint32_t mem_type = 0;
|
|
||||||
uint32_t cur_flags;
|
|
||||||
|
|
||||||
if (drm_bo_mem_compat(mem))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
BUG_ON(mem->mm_node);
|
|
||||||
|
|
||||||
for (i = 0; i < num_prios; ++i) {
|
|
||||||
mem_type = prios[i];
|
|
||||||
man = &bm->man[mem_type];
|
|
||||||
type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
|
|
||||||
&cur_flags);
|
|
||||||
if (type_ok)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (type_ok) {
|
|
||||||
mem->mm_node = NULL;
|
|
||||||
mem->mem_type = mem_type;
|
|
||||||
mem->flags = cur_flags;
|
|
||||||
DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
DRM_ERROR("Illegal fake buffer flags 0x%016llx\n",
|
|
||||||
(unsigned long long) mem->mask);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* bo locked.
|
* bo locked.
|
||||||
*/
|
*/
|
||||||
|
@ -1375,7 +1364,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
|
||||||
(unsigned long long) bo->mem.mask,
|
(unsigned long long) bo->mem.mask,
|
||||||
(unsigned long long) bo->mem.flags);
|
(unsigned long long) bo->mem.flags);
|
||||||
|
|
||||||
ret = driver->fence_type(bo, &ftype);
|
ret = driver->fence_type(bo, &fence_class, &ftype);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("Driver did not support given buffer permissions\n");
|
DRM_ERROR("Driver did not support given buffer permissions\n");
|
||||||
|
@ -1405,15 +1394,12 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bo->fence_class = fence_class;
|
bo->new_fence_class = fence_class;
|
||||||
bo->fence_type = ftype;
|
bo->new_fence_type = ftype;
|
||||||
ret = drm_bo_wait_unmapped(bo, no_wait);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (bo->type == drm_bo_type_fake) {
|
ret = drm_bo_wait_unmapped(bo, no_wait);
|
||||||
ret = drm_bo_check_fake(dev, &bo->mem);
|
if (ret) {
|
||||||
if (ret)
|
DRM_ERROR("Timed out waiting for buffer unmap.\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1465,11 +1451,45 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drm_bo_handle_validate(struct drm_file *file_priv,
|
int drm_bo_do_validate(struct drm_buffer_object *bo,
|
||||||
uint32_t handle,
|
uint64_t flags, uint64_t mask, uint32_t hint,
|
||||||
|
uint32_t fence_class,
|
||||||
|
int no_wait,
|
||||||
|
struct drm_bo_info_rep *rep)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
mutex_lock(&bo->mutex);
|
||||||
|
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
|
||||||
|
DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
|
||||||
|
ret = drm_bo_new_mask(bo, flags, hint);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
ret = drm_buffer_object_validate(bo,
|
||||||
|
fence_class,
|
||||||
|
!(hint & DRM_BO_HINT_DONT_FENCE),
|
||||||
|
no_wait);
|
||||||
|
out:
|
||||||
|
if (rep)
|
||||||
|
drm_bo_fill_rep_arg(bo, rep);
|
||||||
|
|
||||||
|
mutex_unlock(&bo->mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_do_validate);
|
||||||
|
|
||||||
|
|
||||||
|
int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
|
||||||
uint32_t fence_class,
|
uint32_t fence_class,
|
||||||
uint64_t flags, uint64_t mask, uint32_t hint,
|
uint64_t flags, uint64_t mask, uint32_t hint,
|
||||||
struct drm_bo_info_rep *rep)
|
struct drm_bo_info_rep * rep,
|
||||||
|
struct drm_buffer_object **bo_rep)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = file_priv->head->dev;
|
struct drm_device *dev = file_priv->head->dev;
|
||||||
struct drm_buffer_object *bo;
|
struct drm_buffer_object *bo;
|
||||||
|
@ -1479,34 +1499,22 @@ static int drm_bo_handle_validate(struct drm_file *file_priv,
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
bo = drm_lookup_buffer_object(file_priv, handle, 1);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (!bo) {
|
if (!bo) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&bo->mutex);
|
ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
|
||||||
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
|
no_wait, rep);
|
||||||
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
|
|
||||||
ret = drm_bo_new_mask(bo, flags, hint);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
ret =
|
|
||||||
drm_buffer_object_validate(bo, fence_class,
|
|
||||||
!(hint & DRM_BO_HINT_DONT_FENCE),
|
|
||||||
no_wait);
|
|
||||||
drm_bo_fill_rep_arg(bo, rep);
|
|
||||||
|
|
||||||
out:
|
|
||||||
|
|
||||||
mutex_unlock(&bo->mutex);
|
|
||||||
|
|
||||||
|
if (!ret && bo_rep)
|
||||||
|
*bo_rep = bo;
|
||||||
|
else
|
||||||
drm_bo_usage_deref_unlocked(&bo);
|
drm_bo_usage_deref_unlocked(&bo);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_handle_validate);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fills out the generic buffer object ioctl reply with the information for
|
* Fills out the generic buffer object ioctl reply with the information for
|
||||||
|
@ -1582,7 +1590,7 @@ int drm_buffer_object_create(struct drm_device *dev,
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned long num_pages;
|
unsigned long num_pages;
|
||||||
|
|
||||||
if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
|
if (buffer_start & ~PAGE_MASK) {
|
||||||
DRM_ERROR("Invalid buffer object start.\n");
|
DRM_ERROR("Invalid buffer object start.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -1611,17 +1619,16 @@ int drm_buffer_object_create(struct drm_device *dev,
|
||||||
INIT_LIST_HEAD(&bo->vma_list);
|
INIT_LIST_HEAD(&bo->vma_list);
|
||||||
#endif
|
#endif
|
||||||
bo->dev = dev;
|
bo->dev = dev;
|
||||||
|
if (buffer_start != 0)
|
||||||
|
bo->type = drm_bo_type_user;
|
||||||
|
else
|
||||||
bo->type = type;
|
bo->type = type;
|
||||||
|
bo->num_pages = num_pages;
|
||||||
bo->mem.mem_type = DRM_BO_MEM_LOCAL;
|
bo->mem.mem_type = DRM_BO_MEM_LOCAL;
|
||||||
bo->mem.num_pages = num_pages;
|
bo->mem.num_pages = bo->num_pages;
|
||||||
bo->mem.mm_node = NULL;
|
bo->mem.mm_node = NULL;
|
||||||
bo->mem.page_alignment = page_alignment;
|
bo->mem.page_alignment = page_alignment;
|
||||||
if (bo->type == drm_bo_type_fake) {
|
|
||||||
bo->offset = buffer_start;
|
|
||||||
bo->buffer_start = 0;
|
|
||||||
} else {
|
|
||||||
bo->buffer_start = buffer_start;
|
bo->buffer_start = buffer_start;
|
||||||
}
|
|
||||||
bo->priv_flags = 0;
|
bo->priv_flags = 0;
|
||||||
bo->mem.flags = 0ULL;
|
bo->mem.flags = 0ULL;
|
||||||
bo->mem.mask = 0ULL;
|
bo->mem.mask = 0ULL;
|
||||||
|
@ -1640,18 +1647,12 @@ int drm_buffer_object_create(struct drm_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
bo->fence_class = 0;
|
bo->fence_class = 0;
|
||||||
ret = driver->fence_type(bo, &bo->fence_type);
|
ret = driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("Driver did not support given buffer permissions\n");
|
DRM_ERROR("Driver did not support given buffer permissions\n");
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bo->type == drm_bo_type_fake) {
|
|
||||||
ret = drm_bo_check_fake(dev, &bo->mem);
|
|
||||||
if (ret)
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drm_bo_add_ttm(bo);
|
ret = drm_bo_add_ttm(bo);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
@ -1672,7 +1673,7 @@ int drm_buffer_object_create(struct drm_device *dev,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_buffer_object_create);
|
EXPORT_SYMBOL(drm_buffer_object_create);
|
||||||
|
|
||||||
int drm_bo_add_user_object(struct drm_file *file_priv,
|
static int drm_bo_add_user_object(struct drm_file *file_priv,
|
||||||
struct drm_buffer_object *bo, int shareable)
|
struct drm_buffer_object *bo, int shareable)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = file_priv->head->dev;
|
struct drm_device *dev = file_priv->head->dev;
|
||||||
|
@ -1692,7 +1693,6 @@ int drm_bo_add_user_object(struct drm_file *file_priv,
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_bo_add_user_object);
|
|
||||||
|
|
||||||
static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv)
|
static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
|
@ -1742,7 +1742,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
|
||||||
req->bo_req.flags,
|
req->bo_req.flags,
|
||||||
req->bo_req.mask,
|
req->bo_req.mask,
|
||||||
req->bo_req.hint,
|
req->bo_req.hint,
|
||||||
&rep);
|
&rep, NULL);
|
||||||
break;
|
break;
|
||||||
case drm_bo_fence:
|
case drm_bo_fence:
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
@ -1784,18 +1784,16 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
|
||||||
struct drm_buffer_object *entry;
|
struct drm_buffer_object *entry;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align, %d type\n",
|
DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
|
||||||
(int)(req->size / 1024), req->page_alignment * 4, req->type);
|
(int)(req->size / 1024), req->page_alignment * 4);
|
||||||
|
|
||||||
if (!dev->bm.initialized) {
|
if (!dev->bm.initialized) {
|
||||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
DRM_ERROR("Buffer object manager is not initialized.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (req->type == drm_bo_type_fake)
|
|
||||||
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
|
||||||
|
|
||||||
ret = drm_buffer_object_create(file_priv->head->dev,
|
ret = drm_buffer_object_create(file_priv->head->dev,
|
||||||
req->size, req->type, req->mask,
|
req->size, drm_bo_type_dc, req->mask,
|
||||||
req->hint, req->page_alignment,
|
req->hint, req->page_alignment,
|
||||||
req->buffer_start, &entry);
|
req->buffer_start, &entry);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -1816,32 +1814,6 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
struct drm_bo_handle_arg *arg = data;
|
|
||||||
struct drm_user_object *uo;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
DRM_DEBUG("drm_bo_destroy_ioctl: buffer %d\n", arg->handle);
|
|
||||||
|
|
||||||
if (!dev->bm.initialized) {
|
|
||||||
DRM_ERROR("Buffer object manager is not initialized.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
uo = drm_lookup_user_object(file_priv, arg->handle);
|
|
||||||
if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) {
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
ret = drm_remove_user_object(file_priv, uo);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
struct drm_bo_map_wait_idle_arg *arg = data;
|
struct drm_bo_map_wait_idle_arg *arg = data;
|
||||||
|
@ -2093,9 +2065,30 @@ static void drm_bo_clean_unfenced(struct drm_device *dev)
|
||||||
struct drm_buffer_manager *bm = &dev->bm;
|
struct drm_buffer_manager *bm = &dev->bm;
|
||||||
struct list_head *head, *list;
|
struct list_head *head, *list;
|
||||||
struct drm_buffer_object *entry;
|
struct drm_buffer_object *entry;
|
||||||
|
struct drm_fence_object *fence;
|
||||||
|
|
||||||
head = &bm->unfenced;
|
head = &bm->unfenced;
|
||||||
|
|
||||||
|
if (list_empty(head))
|
||||||
|
return;
|
||||||
|
|
||||||
|
DRM_ERROR("Clean unfenced\n");
|
||||||
|
|
||||||
|
if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fixme: Should really wait here.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fence)
|
||||||
|
drm_fence_usage_deref_locked(&fence);
|
||||||
|
|
||||||
|
if (list_empty(head))
|
||||||
|
return;
|
||||||
|
|
||||||
|
DRM_ERROR("Really clean unfenced\n");
|
||||||
|
|
||||||
list = head->next;
|
list = head->next;
|
||||||
while(list != head) {
|
while(list != head) {
|
||||||
prefetch(list->next);
|
prefetch(list->next);
|
||||||
|
@ -2255,7 +2248,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
|
||||||
|
|
||||||
if (!man->has_type) {
|
if (!man->has_type) {
|
||||||
DRM_ERROR("Trying to take down uninitialized "
|
DRM_ERROR("Trying to take down uninitialized "
|
||||||
"memory manager type\n");
|
"memory manager type %u\n", mem_type);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
man->use_type = 0;
|
man->use_type = 0;
|
||||||
|
|
|
@ -71,9 +71,7 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo,
|
||||||
save_flags = old_mem->flags;
|
save_flags = old_mem->flags;
|
||||||
}
|
}
|
||||||
if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
|
if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
|
||||||
ret = drm_bind_ttm(ttm,
|
ret = drm_bind_ttm(ttm, new_mem);
|
||||||
new_mem->flags & DRM_BO_FLAG_CACHED,
|
|
||||||
new_mem->mm_node->start);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -345,6 +343,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
|
||||||
ret = drm_fence_object_create(dev, fence_class, fence_type,
|
ret = drm_fence_object_create(dev, fence_class, fence_type,
|
||||||
fence_flags | DRM_FENCE_FLAG_EMIT,
|
fence_flags | DRM_FENCE_FLAG_EMIT,
|
||||||
&bo->fence);
|
&bo->fence);
|
||||||
|
bo->fence_type = fence_type;
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -411,3 +410,194 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
|
EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
|
||||||
|
|
||||||
|
int drm_bo_same_page(unsigned long offset,
|
||||||
|
unsigned long offset2)
|
||||||
|
{
|
||||||
|
return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_same_page);
|
||||||
|
|
||||||
|
unsigned long drm_bo_offset_end(unsigned long offset,
|
||||||
|
unsigned long end)
|
||||||
|
{
|
||||||
|
|
||||||
|
offset = (offset + PAGE_SIZE) & PAGE_MASK;
|
||||||
|
return (end < offset) ? end : offset;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_offset_end);
|
||||||
|
|
||||||
|
|
||||||
|
static pgprot_t drm_kernel_io_prot(uint32_t map_type)
|
||||||
|
{
|
||||||
|
pgprot_t tmp = PAGE_KERNEL;
|
||||||
|
|
||||||
|
#if defined(__i386__) || defined(__x86_64__)
|
||||||
|
#ifdef USE_PAT_WC
|
||||||
|
#warning using pat
|
||||||
|
if (drm_use_pat() && map_type == _DRM_TTM) {
|
||||||
|
pgprot_val(tmp) |= _PAGE_PAT;
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
|
||||||
|
pgprot_val(tmp) |= _PAGE_PCD;
|
||||||
|
pgprot_val(tmp) &= ~_PAGE_PWT;
|
||||||
|
}
|
||||||
|
#elif defined(__powerpc__)
|
||||||
|
pgprot_val(tmp) |= _PAGE_NO_CACHE;
|
||||||
|
if (map_type == _DRM_REGISTERS)
|
||||||
|
pgprot_val(tmp) |= _PAGE_GUARDED;
|
||||||
|
#endif
|
||||||
|
#if defined(__ia64__)
|
||||||
|
if (map_type == _DRM_TTM)
|
||||||
|
tmp = pgprot_writecombine(tmp);
|
||||||
|
else
|
||||||
|
tmp = pgprot_noncached(tmp);
|
||||||
|
#endif
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
|
||||||
|
unsigned long bus_offset, unsigned long bus_size,
|
||||||
|
struct drm_bo_kmap_obj *map)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = bo->dev;
|
||||||
|
struct drm_bo_mem_reg *mem = &bo->mem;
|
||||||
|
struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
|
||||||
|
|
||||||
|
if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
|
||||||
|
map->bo_kmap_type = bo_map_premapped;
|
||||||
|
map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
|
||||||
|
} else {
|
||||||
|
map->bo_kmap_type = bo_map_iomap;
|
||||||
|
map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
|
||||||
|
}
|
||||||
|
return (!map->virtual) ? -ENOMEM : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_page,
|
||||||
|
unsigned long num_pages, struct drm_bo_kmap_obj *map)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = bo->dev;
|
||||||
|
struct drm_bo_mem_reg *mem = &bo->mem;
|
||||||
|
struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
|
||||||
|
pgprot_t prot;
|
||||||
|
struct drm_ttm *ttm = bo->ttm;
|
||||||
|
struct page *d;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
BUG_ON(!ttm);
|
||||||
|
|
||||||
|
if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We're mapping a single page, and the desired
|
||||||
|
* page protection is consistent with the bo.
|
||||||
|
*/
|
||||||
|
|
||||||
|
map->bo_kmap_type = bo_map_kmap;
|
||||||
|
map->page = drm_ttm_get_page(ttm, start_page);
|
||||||
|
map->virtual = kmap(map->page);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Populate the part we're mapping;
|
||||||
|
*/
|
||||||
|
|
||||||
|
for (i = start_page; i< start_page + num_pages; ++i) {
|
||||||
|
d = drm_ttm_get_page(ttm, i);
|
||||||
|
if (!d)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to use vmap to get the desired page protection
|
||||||
|
* or to make the buffer object look contigous.
|
||||||
|
*/
|
||||||
|
|
||||||
|
prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
|
||||||
|
PAGE_KERNEL :
|
||||||
|
drm_kernel_io_prot(man->drm_bus_maptype);
|
||||||
|
map->bo_kmap_type = bo_map_vmap;
|
||||||
|
map->virtual = vmap(ttm->pages + start_page,
|
||||||
|
num_pages, 0, prot);
|
||||||
|
}
|
||||||
|
return (!map->virtual) ? -ENOMEM : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function is to be used for kernel mapping of buffer objects.
|
||||||
|
* It chooses the appropriate mapping method depending on the memory type
|
||||||
|
* and caching policy the buffer currently has.
|
||||||
|
* Mapping multiple pages or buffers that live in io memory is a bit slow and
|
||||||
|
* consumes vmalloc space. Be restrictive with such mappings.
|
||||||
|
* Mapping single pages usually returns the logical kernel address, (which is fast)
|
||||||
|
* BUG may use slower temporary mappings for high memory pages or
|
||||||
|
* uncached / write-combined pages.
|
||||||
|
*
|
||||||
|
* The function fills in a drm_bo_kmap_obj which can be used to return the
|
||||||
|
* kernel virtual address of the buffer.
|
||||||
|
*
|
||||||
|
* Code servicing a non-priviliged user request is only allowed to map one
|
||||||
|
* page at a time. We might need to implement a better scheme to stop such
|
||||||
|
* processes from consuming all vmalloc space.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
|
||||||
|
unsigned long num_pages, struct drm_bo_kmap_obj *map)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
unsigned long bus_base;
|
||||||
|
unsigned long bus_offset;
|
||||||
|
unsigned long bus_size;
|
||||||
|
|
||||||
|
map->virtual = NULL;
|
||||||
|
|
||||||
|
if (num_pages > bo->num_pages)
|
||||||
|
return -EINVAL;
|
||||||
|
if (start_page > bo->num_pages)
|
||||||
|
return -EINVAL;
|
||||||
|
#if 0
|
||||||
|
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
|
||||||
|
return -EPERM;
|
||||||
|
#endif
|
||||||
|
ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
|
||||||
|
&bus_offset, &bus_size);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (bus_size == 0) {
|
||||||
|
return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
|
||||||
|
} else {
|
||||||
|
bus_offset += start_page << PAGE_SHIFT;
|
||||||
|
bus_size = num_pages << PAGE_SHIFT;
|
||||||
|
return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_kmap);
|
||||||
|
|
||||||
|
void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
|
||||||
|
{
|
||||||
|
if (!map->virtual)
|
||||||
|
return;
|
||||||
|
|
||||||
|
switch(map->bo_kmap_type) {
|
||||||
|
case bo_map_iomap:
|
||||||
|
iounmap(map->virtual);
|
||||||
|
break;
|
||||||
|
case bo_map_vmap:
|
||||||
|
vunmap(map->virtual);
|
||||||
|
break;
|
||||||
|
case bo_map_kmap:
|
||||||
|
kunmap(map->page);
|
||||||
|
break;
|
||||||
|
case bo_map_premapped:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
map->virtual = NULL;
|
||||||
|
map->page = NULL;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_bo_kunmap);
|
||||||
|
|
|
@ -200,7 +200,10 @@ extern void drm_clear_vma(struct vm_area_struct *vma,
|
||||||
extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
|
extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
|
||||||
|
|
||||||
#ifndef GFP_DMA32
|
#ifndef GFP_DMA32
|
||||||
#define GFP_DMA32 0
|
#define GFP_DMA32 GFP_KERNEL
|
||||||
|
#endif
|
||||||
|
#ifndef __GFP_DMA32
|
||||||
|
#define __GFP_DMA32 GFP_KERNEL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
|
||||||
|
|
|
@ -142,7 +142,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, DRM_AUTH),
|
||||||
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_DESTROY, drm_fence_destroy_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
|
||||||
|
@ -152,7 +151,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
|
||||||
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_DESTROY, drm_bo_destroy_ioctl, DRM_AUTH),
|
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
|
||||||
|
@ -332,6 +330,11 @@ int drm_init(struct drm_driver *driver,
|
||||||
while ((pdev =
|
while ((pdev =
|
||||||
pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
|
pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
|
||||||
pid->subdevice, pdev))) {
|
pid->subdevice, pdev))) {
|
||||||
|
/* Are there device class requirements? */
|
||||||
|
if ((pid->class != 0)
|
||||||
|
&& ((pdev->class & pid->class_mask) != pid->class)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
/* is there already a driver loaded, or (short circuit saves work) */
|
/* is there already a driver loaded, or (short circuit saves work) */
|
||||||
/* does something like VesaFB have control of the memory region? */
|
/* does something like VesaFB have control of the memory region? */
|
||||||
if (pci_dev_driver(pdev)
|
if (pci_dev_driver(pdev)
|
||||||
|
@ -358,6 +361,11 @@ int drm_init(struct drm_driver *driver,
|
||||||
pci_get_subsys(pid->vendor, pid->device,
|
pci_get_subsys(pid->vendor, pid->device,
|
||||||
pid->subvendor, pid->subdevice,
|
pid->subvendor, pid->subdevice,
|
||||||
pdev))) {
|
pdev))) {
|
||||||
|
/* Are there device class requirements? */
|
||||||
|
if ((pid->class != 0)
|
||||||
|
&& ((pdev->class & pid->class_mask) != pid->class)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
/* stealth mode requires a manual probe */
|
/* stealth mode requires a manual probe */
|
||||||
pci_dev_get(pdev);
|
pci_dev_get(pdev);
|
||||||
if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) {
|
if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) {
|
||||||
|
|
|
@ -34,14 +34,14 @@
|
||||||
* Typically called by the IRQ handler.
|
* Typically called by the IRQ handler.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void drm_fence_handler(struct drm_device * dev, uint32_t class,
|
void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
|
||||||
uint32_t sequence, uint32_t type)
|
uint32_t sequence, uint32_t type, uint32_t error)
|
||||||
{
|
{
|
||||||
int wake = 0;
|
int wake = 0;
|
||||||
uint32_t diff;
|
uint32_t diff;
|
||||||
uint32_t relevant;
|
uint32_t relevant;
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_class_manager *fc = &fm->class[class];
|
struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
|
||||||
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
||||||
struct list_head *head;
|
struct list_head *head;
|
||||||
struct drm_fence_object *fence, *next;
|
struct drm_fence_object *fence, *next;
|
||||||
|
@ -49,6 +49,7 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class,
|
||||||
int is_exe = (type & DRM_FENCE_TYPE_EXE);
|
int is_exe = (type & DRM_FENCE_TYPE_EXE);
|
||||||
int ge_last_exe;
|
int ge_last_exe;
|
||||||
|
|
||||||
|
|
||||||
diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
|
diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
|
||||||
|
|
||||||
if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
|
if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
|
||||||
|
@ -57,9 +58,6 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class,
|
||||||
diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
|
diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
|
||||||
ge_last_exe = diff < driver->wrap_diff;
|
ge_last_exe = diff < driver->wrap_diff;
|
||||||
|
|
||||||
if (ge_last_exe)
|
|
||||||
fc->pending_flush &= ~type;
|
|
||||||
|
|
||||||
if (is_exe && ge_last_exe) {
|
if (is_exe && ge_last_exe) {
|
||||||
fc->last_exe_flush = sequence;
|
fc->last_exe_flush = sequence;
|
||||||
}
|
}
|
||||||
|
@ -75,36 +73,68 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fc->pending_flush &= ~type;
|
||||||
head = (found) ? &fence->ring : &fc->ring;
|
head = (found) ? &fence->ring : &fc->ring;
|
||||||
|
|
||||||
list_for_each_entry_safe_reverse(fence, next, head, ring) {
|
list_for_each_entry_safe_reverse(fence, next, head, ring) {
|
||||||
if (&fence->ring == &fc->ring)
|
if (&fence->ring == &fc->ring)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
if (error) {
|
||||||
|
fence->error = error;
|
||||||
|
fence->signaled = fence->type;
|
||||||
|
fence->submitted_flush = fence->type;
|
||||||
|
fence->flush_mask = fence->type;
|
||||||
|
list_del_init(&fence->ring);
|
||||||
|
wake = 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_exe)
|
||||||
type |= fence->native_type;
|
type |= fence->native_type;
|
||||||
|
|
||||||
relevant = type & fence->type;
|
relevant = type & fence->type;
|
||||||
|
|
||||||
if ((fence->signaled | relevant) != fence->signaled) {
|
if ((fence->signaled | relevant) != fence->signaled) {
|
||||||
fence->signaled |= relevant;
|
fence->signaled |= relevant;
|
||||||
|
fence->flush_mask |= relevant;
|
||||||
|
fence->submitted_flush |= relevant;
|
||||||
DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
|
DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
|
||||||
fence->base.hash.key, fence->signaled);
|
fence->base.hash.key, fence->signaled);
|
||||||
fence->submitted_flush |= relevant;
|
|
||||||
wake = 1;
|
wake = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
relevant = fence->flush_mask &
|
relevant = fence->flush_mask &
|
||||||
~(fence->signaled | fence->submitted_flush);
|
~(fence->submitted_flush | fence->signaled);
|
||||||
|
|
||||||
if (relevant) {
|
|
||||||
fc->pending_flush |= relevant;
|
fc->pending_flush |= relevant;
|
||||||
fence->submitted_flush = fence->flush_mask;
|
fence->submitted_flush |= relevant;
|
||||||
}
|
|
||||||
|
|
||||||
if (!(fence->type & ~fence->signaled)) {
|
if (!(fence->type & ~fence->signaled)) {
|
||||||
DRM_DEBUG("Fence completely signaled 0x%08lx\n",
|
DRM_DEBUG("Fence completely signaled 0x%08lx\n",
|
||||||
fence->base.hash.key);
|
fence->base.hash.key);
|
||||||
list_del_init(&fence->ring);
|
list_del_init(&fence->ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reinstate lost flush flags.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if ((fc->pending_flush & type) != type) {
|
||||||
|
head = head->prev;
|
||||||
|
list_for_each_entry(fence, head, ring) {
|
||||||
|
if (&fence->ring == &fc->ring)
|
||||||
|
break;
|
||||||
|
diff = (fc->last_exe_flush - fence->sequence) &
|
||||||
|
driver->sequence_mask;
|
||||||
|
if (diff > driver->wrap_diff)
|
||||||
|
break;
|
||||||
|
|
||||||
|
relevant = fence->submitted_flush & ~fence->signaled;
|
||||||
|
fc->pending_flush |= relevant;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wake) {
|
if (wake) {
|
||||||
|
@ -141,6 +171,7 @@ void drm_fence_usage_deref_locked(struct drm_fence_object ** fence)
|
||||||
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
|
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_usage_deref_locked);
|
||||||
|
|
||||||
void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)
|
void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)
|
||||||
{
|
{
|
||||||
|
@ -160,6 +191,7 @@ void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
|
||||||
|
|
||||||
struct drm_fence_object
|
struct drm_fence_object
|
||||||
*drm_fence_reference_locked(struct drm_fence_object *src)
|
*drm_fence_reference_locked(struct drm_fence_object *src)
|
||||||
|
@ -178,7 +210,7 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst,
|
||||||
atomic_inc(&src->usage);
|
atomic_inc(&src->usage);
|
||||||
mutex_unlock(&src->dev->struct_mutex);
|
mutex_unlock(&src->dev->struct_mutex);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_reference_unlocked);
|
||||||
|
|
||||||
static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base)
|
static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base)
|
||||||
{
|
{
|
||||||
|
@ -198,7 +230,7 @@ int drm_fence_object_signaled(struct drm_fence_object * fence,
|
||||||
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
||||||
|
|
||||||
if (poke_flush)
|
if (poke_flush)
|
||||||
driver->poke_flush(dev, fence->class);
|
driver->poke_flush(dev, fence->fence_class);
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
read_lock_irqsave(&fm->lock, flags);
|
||||||
signaled =
|
signaled =
|
||||||
(fence->type & mask & fence->signaled) == (fence->type & mask);
|
(fence->type & mask & fence->signaled) == (fence->type & mask);
|
||||||
|
@ -206,6 +238,7 @@ int drm_fence_object_signaled(struct drm_fence_object * fence,
|
||||||
|
|
||||||
return signaled;
|
return signaled;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_object_signaled);
|
||||||
|
|
||||||
static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
|
static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
|
||||||
struct drm_fence_driver * driver, uint32_t sequence)
|
struct drm_fence_driver * driver, uint32_t sequence)
|
||||||
|
@ -229,7 +262,7 @@ int drm_fence_object_flush(struct drm_fence_object * fence,
|
||||||
{
|
{
|
||||||
struct drm_device *dev = fence->dev;
|
struct drm_device *dev = fence->dev;
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_class_manager *fc = &fm->class[fence->class];
|
struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
|
||||||
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -241,7 +274,8 @@ int drm_fence_object_flush(struct drm_fence_object * fence,
|
||||||
|
|
||||||
write_lock_irqsave(&fm->lock, flags);
|
write_lock_irqsave(&fm->lock, flags);
|
||||||
fence->flush_mask |= type;
|
fence->flush_mask |= type;
|
||||||
if (fence->submitted_flush == fence->signaled) {
|
if ((fence->submitted_flush & fence->signaled)
|
||||||
|
== fence->submitted_flush) {
|
||||||
if ((fence->type & DRM_FENCE_TYPE_EXE) &&
|
if ((fence->type & DRM_FENCE_TYPE_EXE) &&
|
||||||
!(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
|
!(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
|
||||||
drm_fence_flush_exe(fc, driver, fence->sequence);
|
drm_fence_flush_exe(fc, driver, fence->sequence);
|
||||||
|
@ -253,7 +287,7 @@ int drm_fence_object_flush(struct drm_fence_object * fence,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
write_unlock_irqrestore(&fm->lock, flags);
|
write_unlock_irqrestore(&fm->lock, flags);
|
||||||
driver->poke_flush(dev, fence->class);
|
driver->poke_flush(dev, fence->fence_class);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,10 +296,10 @@ int drm_fence_object_flush(struct drm_fence_object * fence,
|
||||||
* wrapped around and reused.
|
* wrapped around and reused.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence)
|
void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t sequence)
|
||||||
{
|
{
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_class_manager *fc = &fm->class[class];
|
struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
|
||||||
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
||||||
uint32_t old_sequence;
|
uint32_t old_sequence;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -308,7 +342,7 @@ static int drm_fence_lazy_wait(struct drm_fence_object *fence,
|
||||||
{
|
{
|
||||||
struct drm_device *dev = fence->dev;
|
struct drm_device *dev = fence->dev;
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_class_manager *fc = &fm->class[fence->class];
|
struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
|
||||||
int signaled;
|
int signaled;
|
||||||
unsigned long _end = jiffies + 3*DRM_HZ;
|
unsigned long _end = jiffies + 3*DRM_HZ;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -329,7 +363,15 @@ static int drm_fence_lazy_wait(struct drm_fence_object *fence,
|
||||||
if (ret == -EBUSY) {
|
if (ret == -EBUSY) {
|
||||||
DRM_ERROR("Fence timeout. "
|
DRM_ERROR("Fence timeout. "
|
||||||
"GPU lockup or fence driver was "
|
"GPU lockup or fence driver was "
|
||||||
"taken down.\n");
|
"taken down. %d 0x%08x 0x%02x 0x%02x 0x%02x\n",
|
||||||
|
fence->fence_class,
|
||||||
|
fence->sequence,
|
||||||
|
fence->type,
|
||||||
|
mask,
|
||||||
|
fence->signaled);
|
||||||
|
DRM_ERROR("Pending exe flush %d 0x%08x\n",
|
||||||
|
fc->pending_exe_flush,
|
||||||
|
fc->exe_flush_sequence);
|
||||||
}
|
}
|
||||||
return ((ret == -EINTR) ? -EAGAIN : ret);
|
return ((ret == -EINTR) ? -EAGAIN : ret);
|
||||||
}
|
}
|
||||||
|
@ -348,6 +390,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence,
|
||||||
if (mask & ~fence->type) {
|
if (mask & ~fence->type) {
|
||||||
DRM_ERROR("Wait trying to extend fence type"
|
DRM_ERROR("Wait trying to extend fence type"
|
||||||
" 0x%08x 0x%08x\n", mask, fence->type);
|
" 0x%08x 0x%08x\n", mask, fence->type);
|
||||||
|
BUG();
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,7 +409,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence,
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
if (driver->has_irq(dev, fence->class,
|
if (driver->has_irq(dev, fence->fence_class,
|
||||||
DRM_FENCE_TYPE_EXE)) {
|
DRM_FENCE_TYPE_EXE)) {
|
||||||
ret = drm_fence_lazy_wait(fence, ignore_signals,
|
ret = drm_fence_lazy_wait(fence, ignore_signals,
|
||||||
DRM_FENCE_TYPE_EXE);
|
DRM_FENCE_TYPE_EXE);
|
||||||
|
@ -374,7 +417,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (driver->has_irq(dev, fence->class,
|
if (driver->has_irq(dev, fence->fence_class,
|
||||||
mask & ~DRM_FENCE_TYPE_EXE)) {
|
mask & ~DRM_FENCE_TYPE_EXE)) {
|
||||||
ret = drm_fence_lazy_wait(fence, ignore_signals,
|
ret = drm_fence_lazy_wait(fence, ignore_signals,
|
||||||
mask);
|
mask);
|
||||||
|
@ -402,26 +445,28 @@ int drm_fence_object_wait(struct drm_fence_object * fence,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_object_wait);
|
||||||
|
|
||||||
|
|
||||||
int drm_fence_object_emit(struct drm_fence_object * fence,
|
int drm_fence_object_emit(struct drm_fence_object * fence,
|
||||||
uint32_t fence_flags, uint32_t class, uint32_t type)
|
uint32_t fence_flags, uint32_t fence_class, uint32_t type)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = fence->dev;
|
struct drm_device *dev = fence->dev;
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
||||||
struct drm_fence_class_manager *fc = &fm->class[fence->class];
|
struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
uint32_t sequence;
|
uint32_t sequence;
|
||||||
uint32_t native_type;
|
uint32_t native_type;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
drm_fence_unring(dev, &fence->ring);
|
drm_fence_unring(dev, &fence->ring);
|
||||||
ret = driver->emit(dev, class, fence_flags, &sequence, &native_type);
|
ret = driver->emit(dev, fence_class, fence_flags, &sequence, &native_type);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
write_lock_irqsave(&fm->lock, flags);
|
write_lock_irqsave(&fm->lock, flags);
|
||||||
fence->class = class;
|
fence->fence_class = fence_class;
|
||||||
fence->type = type;
|
fence->type = type;
|
||||||
fence->flush_mask = 0x00;
|
fence->flush_mask = 0x00;
|
||||||
fence->submitted_flush = 0x00;
|
fence->submitted_flush = 0x00;
|
||||||
|
@ -434,8 +479,9 @@ int drm_fence_object_emit(struct drm_fence_object * fence,
|
||||||
write_unlock_irqrestore(&fm->lock, flags);
|
write_unlock_irqrestore(&fm->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_object_emit);
|
||||||
|
|
||||||
static int drm_fence_object_init(struct drm_device * dev, uint32_t class,
|
static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,
|
||||||
uint32_t type,
|
uint32_t type,
|
||||||
uint32_t fence_flags,
|
uint32_t fence_flags,
|
||||||
struct drm_fence_object * fence)
|
struct drm_fence_object * fence)
|
||||||
|
@ -456,7 +502,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t class,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
INIT_LIST_HEAD(&fence->base.list);
|
INIT_LIST_HEAD(&fence->base.list);
|
||||||
fence->class = class;
|
fence->fence_class = fence_class;
|
||||||
fence->type = type;
|
fence->type = type;
|
||||||
fence->flush_mask = 0;
|
fence->flush_mask = 0;
|
||||||
fence->submitted_flush = 0;
|
fence->submitted_flush = 0;
|
||||||
|
@ -466,7 +512,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t class,
|
||||||
write_unlock_irqrestore(&fm->lock, flags);
|
write_unlock_irqrestore(&fm->lock, flags);
|
||||||
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
|
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
|
||||||
ret = drm_fence_object_emit(fence, fence_flags,
|
ret = drm_fence_object_emit(fence, fence_flags,
|
||||||
fence->class, type);
|
fence->fence_class, type);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -491,7 +537,7 @@ out:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_fence_add_user_object);
|
EXPORT_SYMBOL(drm_fence_add_user_object);
|
||||||
|
|
||||||
int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t type,
|
int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint32_t type,
|
||||||
unsigned flags, struct drm_fence_object ** c_fence)
|
unsigned flags, struct drm_fence_object ** c_fence)
|
||||||
{
|
{
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
|
@ -501,7 +547,7 @@ int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t ty
|
||||||
fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
|
fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
|
||||||
if (!fence)
|
if (!fence)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
ret = drm_fence_object_init(dev, class, type, flags, fence);
|
ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -517,7 +563,7 @@ EXPORT_SYMBOL(drm_fence_object_create);
|
||||||
void drm_fence_manager_init(struct drm_device * dev)
|
void drm_fence_manager_init(struct drm_device * dev)
|
||||||
{
|
{
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_class_manager *class;
|
struct drm_fence_class_manager *fence_class;
|
||||||
struct drm_fence_driver *fed = dev->driver->fence_driver;
|
struct drm_fence_driver *fed = dev->driver->fence_driver;
|
||||||
int i;
|
int i;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -533,11 +579,11 @@ void drm_fence_manager_init(struct drm_device * dev)
|
||||||
BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
|
BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
|
||||||
|
|
||||||
for (i=0; i<fm->num_classes; ++i) {
|
for (i=0; i<fm->num_classes; ++i) {
|
||||||
class = &fm->class[i];
|
fence_class = &fm->fence_class[i];
|
||||||
|
|
||||||
INIT_LIST_HEAD(&class->ring);
|
INIT_LIST_HEAD(&fence_class->ring);
|
||||||
class->pending_flush = 0;
|
fence_class->pending_flush = 0;
|
||||||
DRM_INIT_WAITQUEUE(&class->fence_queue);
|
DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_set(&fm->count, 0);
|
atomic_set(&fm->count, 0);
|
||||||
|
@ -545,6 +591,24 @@ void drm_fence_manager_init(struct drm_device * dev)
|
||||||
write_unlock_irqrestore(&fm->lock, flags);
|
write_unlock_irqrestore(&fm->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = fence->dev;
|
||||||
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
|
unsigned long irq_flags;
|
||||||
|
|
||||||
|
read_lock_irqsave(&fm->lock, irq_flags);
|
||||||
|
arg->handle = fence->base.hash.key;
|
||||||
|
arg->fence_class = fence->fence_class;
|
||||||
|
arg->type = fence->type;
|
||||||
|
arg->signaled = fence->signaled;
|
||||||
|
arg->error = fence->error;
|
||||||
|
arg->sequence = fence->sequence;
|
||||||
|
read_unlock_irqrestore(&fm->lock, irq_flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_fence_fill_arg);
|
||||||
|
|
||||||
|
|
||||||
void drm_fence_manager_takedown(struct drm_device * dev)
|
void drm_fence_manager_takedown(struct drm_device * dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -572,7 +636,6 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -582,7 +645,7 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
||||||
|
|
||||||
if (arg->flags & DRM_FENCE_FLAG_EMIT)
|
if (arg->flags & DRM_FENCE_FLAG_EMIT)
|
||||||
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
||||||
ret = drm_fence_object_create(dev, arg->class,
|
ret = drm_fence_object_create(dev, arg->fence_class,
|
||||||
arg->type, arg->flags, &fence);
|
arg->type, arg->flags, &fence);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -600,41 +663,13 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
||||||
|
|
||||||
arg->handle = fence->base.hash.key;
|
arg->handle = fence->base.hash.key;
|
||||||
|
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
|
||||||
arg->class = fence->class;
|
drm_fence_fill_arg(fence, arg);
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
|
||||||
struct drm_fence_arg *arg = data;
|
|
||||||
struct drm_user_object *uo;
|
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
if (!fm->initialized) {
|
|
||||||
DRM_ERROR("The DRM driver does not support fencing.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
uo = drm_lookup_user_object(file_priv, arg->handle);
|
|
||||||
if (!uo || (uo->type != drm_fence_type) || uo->owner != file_priv) {
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
ret = drm_remove_user_object(file_priv, uo);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -642,7 +677,6 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
struct drm_user_object *uo;
|
struct drm_user_object *uo;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -654,12 +688,7 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
fence = drm_lookup_fence_object(file_priv, arg->handle);
|
fence = drm_lookup_fence_object(file_priv, arg->handle);
|
||||||
|
drm_fence_fill_arg(fence, arg);
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
|
||||||
arg->class = fence->class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -687,7 +716,6 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -699,11 +727,7 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
if (!fence)
|
if (!fence)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
drm_fence_fill_arg(fence, arg);
|
||||||
arg->class = fence->class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -715,7 +739,6 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -728,11 +751,7 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
ret = drm_fence_object_flush(fence, arg->type);
|
ret = drm_fence_object_flush(fence, arg->type);
|
||||||
|
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
drm_fence_fill_arg(fence, arg);
|
||||||
arg->class = fence->class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -745,7 +764,6 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -760,11 +778,7 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
||||||
arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
|
arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
|
||||||
0, arg->type);
|
0, arg->type);
|
||||||
|
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
drm_fence_fill_arg(fence, arg);
|
||||||
arg->class = fence->class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -777,7 +791,6 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -789,14 +802,10 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
||||||
fence = drm_lookup_fence_object(file_priv, arg->handle);
|
fence = drm_lookup_fence_object(file_priv, arg->handle);
|
||||||
if (!fence)
|
if (!fence)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
ret = drm_fence_object_emit(fence, arg->flags, arg->class,
|
ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
|
||||||
arg->type);
|
arg->type);
|
||||||
|
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
drm_fence_fill_arg(fence, arg);
|
||||||
arg->class = fence->class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -808,7 +817,6 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_arg *arg = data;
|
struct drm_fence_arg *arg = data;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
unsigned long flags;
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (!fm->initialized) {
|
if (!fm->initialized) {
|
||||||
|
@ -821,23 +829,22 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
||||||
ret = drm_fence_buffer_objects(file_priv, NULL, arg->flags,
|
ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
|
||||||
NULL, &fence);
|
NULL, &fence);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
|
||||||
ret = drm_fence_add_user_object(file_priv, fence,
|
ret = drm_fence_add_user_object(file_priv, fence,
|
||||||
arg->flags &
|
arg->flags &
|
||||||
DRM_FENCE_FLAG_SHAREABLE);
|
DRM_FENCE_FLAG_SHAREABLE);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
arg->handle = fence->base.hash.key;
|
arg->handle = fence->base.hash.key;
|
||||||
|
|
||||||
read_lock_irqsave(&fm->lock, flags);
|
drm_fence_fill_arg(fence, arg);
|
||||||
arg->class = fence->class;
|
|
||||||
arg->type = fence->type;
|
|
||||||
arg->signaled = fence->signaled;
|
|
||||||
read_unlock_irqrestore(&fm->lock, flags);
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -263,7 +263,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
||||||
priv->lock_count = 0;
|
priv->lock_count = 0;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&priv->lhead);
|
INIT_LIST_HEAD(&priv->lhead);
|
||||||
INIT_LIST_HEAD(&priv->user_objects);
|
|
||||||
INIT_LIST_HEAD(&priv->refd_objects);
|
INIT_LIST_HEAD(&priv->refd_objects);
|
||||||
INIT_LIST_HEAD(&priv->fbs);
|
INIT_LIST_HEAD(&priv->fbs);
|
||||||
|
|
||||||
|
@ -339,7 +338,6 @@ static void drm_object_release(struct file *filp) {
|
||||||
|
|
||||||
struct drm_file *priv = filp->private_data;
|
struct drm_file *priv = filp->private_data;
|
||||||
struct list_head *head;
|
struct list_head *head;
|
||||||
struct drm_user_object *user_object;
|
|
||||||
struct drm_ref_object *ref_object;
|
struct drm_ref_object *ref_object;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -358,17 +356,6 @@ static void drm_object_release(struct file *filp) {
|
||||||
head = &priv->refd_objects;
|
head = &priv->refd_objects;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Free leftover user objects created by me.
|
|
||||||
*/
|
|
||||||
|
|
||||||
head = &priv->user_objects;
|
|
||||||
while (head->next != head) {
|
|
||||||
user_object = list_entry(head->next, struct drm_user_object, list);
|
|
||||||
drm_remove_user_object(priv, user_object);
|
|
||||||
head = &priv->user_objects;
|
|
||||||
}
|
|
||||||
|
|
||||||
for(i=0; i<_DRM_NO_REF_TYPES; ++i) {
|
for(i=0; i<_DRM_NO_REF_TYPES; ++i) {
|
||||||
drm_ht_remove(&priv->refd_object_hash[i]);
|
drm_ht_remove(&priv->refd_object_hash[i]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,8 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,
|
||||||
|
|
||||||
DRM_ASSERT_LOCKED(&dev->struct_mutex);
|
DRM_ASSERT_LOCKED(&dev->struct_mutex);
|
||||||
|
|
||||||
atomic_set(&item->refcount, 1);
|
/* The refcount will be bumped to 1 when we add the ref object below. */
|
||||||
|
atomic_set(&item->refcount, 0);
|
||||||
item->shareable = shareable;
|
item->shareable = shareable;
|
||||||
item->owner = priv;
|
item->owner = priv;
|
||||||
|
|
||||||
|
@ -47,9 +48,13 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
list_add_tail(&item->list, &priv->user_objects);
|
ret = drm_add_ref_object(priv, item, _DRM_REF_USE);
|
||||||
return 0;
|
if (ret)
|
||||||
|
ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_add_user_object);
|
||||||
|
|
||||||
struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key)
|
struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key)
|
||||||
{
|
{
|
||||||
|
@ -76,6 +81,7 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t
|
||||||
}
|
}
|
||||||
return item;
|
return item;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_lookup_user_object);
|
||||||
|
|
||||||
static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item)
|
static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item)
|
||||||
{
|
{
|
||||||
|
@ -85,26 +91,10 @@ static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object
|
||||||
if (atomic_dec_and_test(&item->refcount)) {
|
if (atomic_dec_and_test(&item->refcount)) {
|
||||||
ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
|
ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
list_del_init(&item->list);
|
|
||||||
item->remove(priv, item);
|
item->remove(priv, item);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item)
|
|
||||||
{
|
|
||||||
DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
|
|
||||||
|
|
||||||
if (item->owner != priv) {
|
|
||||||
DRM_ERROR("Cannot destroy object not owned by you.\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
item->owner = 0;
|
|
||||||
item->shareable = 0;
|
|
||||||
list_del_init(&item->list);
|
|
||||||
drm_deref_user_object(priv, item);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro,
|
static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro,
|
||||||
enum drm_ref_type action)
|
enum drm_ref_type action)
|
||||||
{
|
{
|
||||||
|
@ -196,6 +186,7 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,
|
||||||
|
|
||||||
return drm_hash_entry(hash, struct drm_ref_object, hash);
|
return drm_hash_entry(hash, struct drm_ref_object, hash);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_lookup_ref_object);
|
||||||
|
|
||||||
static void drm_remove_other_references(struct drm_file * priv,
|
static void drm_remove_other_references(struct drm_file * priv,
|
||||||
struct drm_user_object * ro)
|
struct drm_user_object * ro)
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#define _DRM_OBJECTS_H
|
#define _DRM_OBJECTS_H
|
||||||
|
|
||||||
struct drm_device;
|
struct drm_device;
|
||||||
|
struct drm_bo_mem_reg;
|
||||||
|
|
||||||
/***************************************************
|
/***************************************************
|
||||||
* User space objects. (drm_object.c)
|
* User space objects. (drm_object.c)
|
||||||
|
@ -42,10 +43,14 @@ struct drm_device;
|
||||||
enum drm_object_type {
|
enum drm_object_type {
|
||||||
drm_fence_type,
|
drm_fence_type,
|
||||||
drm_buffer_type,
|
drm_buffer_type,
|
||||||
drm_ttm_type
|
|
||||||
/*
|
/*
|
||||||
* Add other user space object types here.
|
* Add other user space object types here.
|
||||||
*/
|
*/
|
||||||
|
drm_driver_type0 = 256,
|
||||||
|
drm_driver_type1,
|
||||||
|
drm_driver_type2,
|
||||||
|
drm_driver_type3,
|
||||||
|
drm_driver_type4
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -97,15 +102,6 @@ extern int drm_add_user_object(struct drm_file * priv, struct drm_user_object *
|
||||||
extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv,
|
extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv,
|
||||||
uint32_t key);
|
uint32_t key);
|
||||||
|
|
||||||
/*
|
|
||||||
* Must be called with the struct_mutex held.
|
|
||||||
* If "item" has been obtained by a call to drm_lookup_user_object. You may not
|
|
||||||
* release the struct_mutex before calling drm_remove_ref_object.
|
|
||||||
* This function may temporarily release the struct_mutex.
|
|
||||||
*/
|
|
||||||
|
|
||||||
extern int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be called with the struct_mutex held. May temporarily release it.
|
* Must be called with the struct_mutex held. May temporarily release it.
|
||||||
*/
|
*/
|
||||||
|
@ -149,13 +145,14 @@ struct drm_fence_object {
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct list_head ring;
|
struct list_head ring;
|
||||||
int class;
|
int fence_class;
|
||||||
uint32_t native_type;
|
uint32_t native_type;
|
||||||
uint32_t type;
|
uint32_t type;
|
||||||
uint32_t signaled;
|
uint32_t signaled;
|
||||||
uint32_t sequence;
|
uint32_t sequence;
|
||||||
uint32_t flush_mask;
|
uint32_t flush_mask;
|
||||||
uint32_t submitted_flush;
|
uint32_t submitted_flush;
|
||||||
|
uint32_t error;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define _DRM_FENCE_CLASSES 8
|
#define _DRM_FENCE_CLASSES 8
|
||||||
|
@ -173,7 +170,7 @@ struct drm_fence_class_manager {
|
||||||
struct drm_fence_manager {
|
struct drm_fence_manager {
|
||||||
int initialized;
|
int initialized;
|
||||||
rwlock_t lock;
|
rwlock_t lock;
|
||||||
struct drm_fence_class_manager class[_DRM_FENCE_CLASSES];
|
struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES];
|
||||||
uint32_t num_classes;
|
uint32_t num_classes;
|
||||||
atomic_t count;
|
atomic_t count;
|
||||||
};
|
};
|
||||||
|
@ -184,18 +181,18 @@ struct drm_fence_driver {
|
||||||
uint32_t flush_diff;
|
uint32_t flush_diff;
|
||||||
uint32_t sequence_mask;
|
uint32_t sequence_mask;
|
||||||
int lazy_capable;
|
int lazy_capable;
|
||||||
int (*has_irq) (struct drm_device * dev, uint32_t class,
|
int (*has_irq) (struct drm_device * dev, uint32_t fence_class,
|
||||||
uint32_t flags);
|
uint32_t flags);
|
||||||
int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags,
|
int (*emit) (struct drm_device * dev, uint32_t fence_class, uint32_t flags,
|
||||||
uint32_t * breadcrumb, uint32_t * native_type);
|
uint32_t * breadcrumb, uint32_t * native_type);
|
||||||
void (*poke_flush) (struct drm_device * dev, uint32_t class);
|
void (*poke_flush) (struct drm_device * dev, uint32_t fence_class);
|
||||||
};
|
};
|
||||||
|
|
||||||
extern void drm_fence_handler(struct drm_device *dev, uint32_t class,
|
extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
|
||||||
uint32_t sequence, uint32_t type);
|
uint32_t sequence, uint32_t type, uint32_t error);
|
||||||
extern void drm_fence_manager_init(struct drm_device *dev);
|
extern void drm_fence_manager_init(struct drm_device *dev);
|
||||||
extern void drm_fence_manager_takedown(struct drm_device *dev);
|
extern void drm_fence_manager_takedown(struct drm_device *dev);
|
||||||
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class,
|
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
|
||||||
uint32_t sequence);
|
uint32_t sequence);
|
||||||
extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type);
|
extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type);
|
||||||
extern int drm_fence_object_signaled(struct drm_fence_object * fence,
|
extern int drm_fence_object_signaled(struct drm_fence_object * fence,
|
||||||
|
@ -208,8 +205,14 @@ extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
|
||||||
extern int drm_fence_object_wait(struct drm_fence_object * fence,
|
extern int drm_fence_object_wait(struct drm_fence_object * fence,
|
||||||
int lazy, int ignore_signals, uint32_t mask);
|
int lazy, int ignore_signals, uint32_t mask);
|
||||||
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
|
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
|
||||||
uint32_t fence_flags, uint32_t class,
|
uint32_t fence_flags, uint32_t fence_class,
|
||||||
struct drm_fence_object ** c_fence);
|
struct drm_fence_object ** c_fence);
|
||||||
|
extern int drm_fence_object_emit(struct drm_fence_object * fence,
|
||||||
|
uint32_t fence_flags, uint32_t class,
|
||||||
|
uint32_t type);
|
||||||
|
extern void drm_fence_fill_arg(struct drm_fence_object *fence,
|
||||||
|
struct drm_fence_arg *arg);
|
||||||
|
|
||||||
extern int drm_fence_add_user_object(struct drm_file * priv,
|
extern int drm_fence_add_user_object(struct drm_file * priv,
|
||||||
struct drm_fence_object * fence, int shareable);
|
struct drm_fence_object * fence, int shareable);
|
||||||
|
|
||||||
|
@ -258,23 +261,22 @@ struct drm_ttm_backend_func {
|
||||||
unsigned long num_pages, struct page ** pages);
|
unsigned long num_pages, struct page ** pages);
|
||||||
void (*clear) (struct drm_ttm_backend * backend);
|
void (*clear) (struct drm_ttm_backend * backend);
|
||||||
int (*bind) (struct drm_ttm_backend * backend,
|
int (*bind) (struct drm_ttm_backend * backend,
|
||||||
unsigned long offset, int cached);
|
struct drm_bo_mem_reg * bo_mem);
|
||||||
int (*unbind) (struct drm_ttm_backend * backend);
|
int (*unbind) (struct drm_ttm_backend * backend);
|
||||||
void (*destroy) (struct drm_ttm_backend * backend);
|
void (*destroy) (struct drm_ttm_backend * backend);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
struct drm_ttm_backend {
|
typedef struct drm_ttm_backend {
|
||||||
|
struct drm_device *dev;
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
int mem_type;
|
|
||||||
struct drm_ttm_backend_func *func;
|
struct drm_ttm_backend_func *func;
|
||||||
};
|
} drm_ttm_backend_t;
|
||||||
|
|
||||||
struct drm_ttm {
|
struct drm_ttm {
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
uint32_t page_flags;
|
uint32_t page_flags;
|
||||||
unsigned long num_pages;
|
unsigned long num_pages;
|
||||||
unsigned long aper_offset;
|
|
||||||
atomic_t vma_count;
|
atomic_t vma_count;
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
int destroy;
|
int destroy;
|
||||||
|
@ -290,11 +292,13 @@ struct drm_ttm {
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
|
extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
|
||||||
extern int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset);
|
extern int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem);
|
||||||
extern void drm_ttm_unbind(struct drm_ttm * ttm);
|
extern void drm_ttm_unbind(struct drm_ttm * ttm);
|
||||||
extern void drm_ttm_evict(struct drm_ttm * ttm);
|
extern void drm_ttm_evict(struct drm_ttm * ttm);
|
||||||
extern void drm_ttm_fixup_caching(struct drm_ttm * ttm);
|
extern void drm_ttm_fixup_caching(struct drm_ttm * ttm);
|
||||||
extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index);
|
extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index);
|
||||||
|
extern void drm_ttm_cache_flush(void);
|
||||||
|
extern int drm_ttm_populate(struct drm_ttm * ttm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
|
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
|
||||||
|
@ -333,6 +337,14 @@ struct drm_bo_mem_reg {
|
||||||
uint32_t mem_type;
|
uint32_t mem_type;
|
||||||
uint64_t flags;
|
uint64_t flags;
|
||||||
uint64_t mask;
|
uint64_t mask;
|
||||||
|
uint32_t desired_tile_stride;
|
||||||
|
uint32_t hw_tile_stride;
|
||||||
|
};
|
||||||
|
|
||||||
|
enum drm_bo_type {
|
||||||
|
drm_bo_type_dc,
|
||||||
|
drm_bo_type_user,
|
||||||
|
drm_bo_type_kernel, /* for initial kernel allocations */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_buffer_object {
|
struct drm_buffer_object {
|
||||||
|
@ -356,10 +368,13 @@ struct drm_buffer_object {
|
||||||
|
|
||||||
uint32_t fence_type;
|
uint32_t fence_type;
|
||||||
uint32_t fence_class;
|
uint32_t fence_class;
|
||||||
|
uint32_t new_fence_type;
|
||||||
|
uint32_t new_fence_class;
|
||||||
struct drm_fence_object *fence;
|
struct drm_fence_object *fence;
|
||||||
uint32_t priv_flags;
|
uint32_t priv_flags;
|
||||||
wait_queue_head_t event_queue;
|
wait_queue_head_t event_queue;
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
|
unsigned long num_pages;
|
||||||
|
|
||||||
/* For pinned buffers */
|
/* For pinned buffers */
|
||||||
int pinned;
|
int pinned;
|
||||||
|
@ -368,7 +383,6 @@ struct drm_buffer_object {
|
||||||
struct list_head pinned_lru;
|
struct list_head pinned_lru;
|
||||||
|
|
||||||
/* For vm */
|
/* For vm */
|
||||||
|
|
||||||
struct drm_ttm *ttm;
|
struct drm_ttm *ttm;
|
||||||
struct drm_map_list map_list;
|
struct drm_map_list map_list;
|
||||||
uint32_t memory_type;
|
uint32_t memory_type;
|
||||||
|
@ -395,6 +409,7 @@ struct drm_mem_type_manager {
|
||||||
struct list_head pinned;
|
struct list_head pinned;
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
uint32_t drm_bus_maptype;
|
uint32_t drm_bus_maptype;
|
||||||
|
unsigned long gpu_offset;
|
||||||
unsigned long io_offset;
|
unsigned long io_offset;
|
||||||
unsigned long io_size;
|
unsigned long io_size;
|
||||||
void *io_addr;
|
void *io_addr;
|
||||||
|
@ -434,7 +449,8 @@ struct drm_bo_driver {
|
||||||
uint32_t num_mem_busy_prio;
|
uint32_t num_mem_busy_prio;
|
||||||
struct drm_ttm_backend *(*create_ttm_backend_entry)
|
struct drm_ttm_backend *(*create_ttm_backend_entry)
|
||||||
(struct drm_device * dev);
|
(struct drm_device * dev);
|
||||||
int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type);
|
int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
|
||||||
|
uint32_t * type);
|
||||||
int (*invalidate_caches) (struct drm_device * dev, uint64_t flags);
|
int (*invalidate_caches) (struct drm_device * dev, uint64_t flags);
|
||||||
int (*init_mem_type) (struct drm_device * dev, uint32_t type,
|
int (*init_mem_type) (struct drm_device * dev, uint32_t type,
|
||||||
struct drm_mem_type_manager * man);
|
struct drm_mem_type_manager * man);
|
||||||
|
@ -451,6 +467,7 @@ extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_f
|
||||||
extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||||
extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||||
extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||||
|
extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
|
||||||
extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||||
extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||||
extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||||
|
@ -471,34 +488,44 @@ extern int drm_bo_pci_offset(struct drm_device *dev,
|
||||||
extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem);
|
extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem);
|
||||||
|
|
||||||
extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo);
|
extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo);
|
||||||
extern int drm_fence_buffer_objects(struct drm_file * priv,
|
extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo);
|
||||||
|
extern void drm_putback_buffer_objects(struct drm_device *dev);
|
||||||
|
extern int drm_fence_buffer_objects(struct drm_device * dev,
|
||||||
struct list_head *list,
|
struct list_head *list,
|
||||||
uint32_t fence_flags,
|
uint32_t fence_flags,
|
||||||
struct drm_fence_object * fence,
|
struct drm_fence_object * fence,
|
||||||
struct drm_fence_object ** used_fence);
|
struct drm_fence_object ** used_fence);
|
||||||
extern void drm_bo_add_to_lru(struct drm_buffer_object * bo);
|
extern void drm_bo_add_to_lru(struct drm_buffer_object * bo);
|
||||||
|
extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
|
||||||
|
enum drm_bo_type type, uint64_t mask,
|
||||||
|
uint32_t hint, uint32_t page_alignment,
|
||||||
|
unsigned long buffer_start,
|
||||||
|
struct drm_buffer_object **bo);
|
||||||
extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
|
extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
|
||||||
int no_wait);
|
int no_wait);
|
||||||
extern int drm_bo_mem_space(struct drm_buffer_object * bo,
|
extern int drm_bo_mem_space(struct drm_buffer_object * bo,
|
||||||
struct drm_bo_mem_reg * mem, int no_wait);
|
struct drm_bo_mem_reg * mem, int no_wait);
|
||||||
extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
|
extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
|
||||||
int no_wait, int move_unfenced);
|
int no_wait, int move_unfenced);
|
||||||
extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
|
extern int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type);
|
||||||
enum drm_bo_type type, uint64_t mask,
|
|
||||||
uint32_t hint, uint32_t page_alignment,
|
|
||||||
unsigned long buffer_start,
|
|
||||||
struct drm_buffer_object **bo);
|
|
||||||
extern int drm_bo_init_mm(struct drm_device * dev, unsigned type,
|
extern int drm_bo_init_mm(struct drm_device * dev, unsigned type,
|
||||||
unsigned long p_offset, unsigned long p_size);
|
unsigned long p_offset, unsigned long p_size);
|
||||||
extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
|
extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
|
||||||
extern int drm_bo_add_user_object(struct drm_file *file_priv,
|
uint32_t fence_class, uint64_t flags,
|
||||||
struct drm_buffer_object *bo, int sharable);
|
uint64_t mask, uint32_t hint,
|
||||||
extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
|
struct drm_bo_info_rep * rep,
|
||||||
extern int drm_bo_set_pin(struct drm_device *dev,
|
struct drm_buffer_object **bo_rep);
|
||||||
struct drm_buffer_object *bo, int pin);
|
extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv,
|
||||||
|
uint32_t handle,
|
||||||
|
int check_owner);
|
||||||
|
extern int drm_bo_do_validate(struct drm_buffer_object *bo,
|
||||||
|
uint64_t flags, uint64_t mask, uint32_t hint,
|
||||||
|
uint32_t fence_class,
|
||||||
|
int no_wait,
|
||||||
|
struct drm_bo_info_rep *rep);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Buffer object memory move helpers.
|
* Buffer object memory move- and map helpers.
|
||||||
* drm_bo_move.c
|
* drm_bo_move.c
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -514,11 +541,69 @@ extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
|
||||||
uint32_t fence_type,
|
uint32_t fence_type,
|
||||||
uint32_t fence_flags,
|
uint32_t fence_flags,
|
||||||
struct drm_bo_mem_reg * new_mem);
|
struct drm_bo_mem_reg * new_mem);
|
||||||
|
extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
|
||||||
|
extern unsigned long drm_bo_offset_end(unsigned long offset,
|
||||||
|
unsigned long end);
|
||||||
|
|
||||||
extern int drm_mem_reg_ioremap(struct drm_device *dev,
|
struct drm_bo_kmap_obj {
|
||||||
struct drm_bo_mem_reg *mem, void **virtual);
|
void *virtual;
|
||||||
extern void drm_mem_reg_iounmap(struct drm_device *dev,
|
struct page *page;
|
||||||
struct drm_bo_mem_reg *mem, void *virtual);
|
enum {
|
||||||
|
bo_map_iomap,
|
||||||
|
bo_map_vmap,
|
||||||
|
bo_map_kmap,
|
||||||
|
bo_map_premapped,
|
||||||
|
} bo_kmap_type;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
|
||||||
|
{
|
||||||
|
*is_iomem = (map->bo_kmap_type == bo_map_iomap ||
|
||||||
|
map->bo_kmap_type == bo_map_premapped);
|
||||||
|
return map->virtual;
|
||||||
|
}
|
||||||
|
extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
|
||||||
|
extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
|
||||||
|
unsigned long num_pages, struct drm_bo_kmap_obj *map);
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* drm_regman.c
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct drm_reg {
|
||||||
|
struct list_head head;
|
||||||
|
struct drm_fence_object *fence;
|
||||||
|
uint32_t fence_type;
|
||||||
|
uint32_t new_fence_type;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_reg_manager {
|
||||||
|
struct list_head free;
|
||||||
|
struct list_head lru;
|
||||||
|
struct list_head unfenced;
|
||||||
|
|
||||||
|
int (*reg_reusable)(const struct drm_reg *reg, const void *data);
|
||||||
|
void (*reg_destroy)(struct drm_reg *reg);
|
||||||
|
};
|
||||||
|
|
||||||
|
extern int drm_regs_alloc(struct drm_reg_manager *manager,
|
||||||
|
const void *data,
|
||||||
|
uint32_t fence_class,
|
||||||
|
uint32_t fence_type,
|
||||||
|
int interruptible,
|
||||||
|
int no_wait,
|
||||||
|
struct drm_reg **reg);
|
||||||
|
|
||||||
|
extern void drm_regs_fence(struct drm_reg_manager *regs,
|
||||||
|
struct drm_fence_object *fence);
|
||||||
|
|
||||||
|
extern void drm_regs_free(struct drm_reg_manager *manager);
|
||||||
|
extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
|
||||||
|
extern void drm_regs_init(struct drm_reg_manager *manager,
|
||||||
|
int (*reg_reusable)(const struct drm_reg *,
|
||||||
|
const void *),
|
||||||
|
void (*reg_destroy)(struct drm_reg *));
|
||||||
|
|
||||||
extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
|
extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
|
||||||
void **virtual);
|
void **virtual);
|
||||||
|
@ -531,5 +616,4 @@ extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *
|
||||||
#else
|
#else
|
||||||
#define DRM_ASSERT_LOCKED(_mutex)
|
#define DRM_ASSERT_LOCKED(_mutex)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -35,11 +35,12 @@ static void drm_ttm_ipi_handler(void *null)
|
||||||
flush_agp_cache();
|
flush_agp_cache();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void drm_ttm_cache_flush(void)
|
void drm_ttm_cache_flush(void)
|
||||||
{
|
{
|
||||||
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
|
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
|
||||||
DRM_ERROR("Timed out waiting for drm cache flush.\n");
|
DRM_ERROR("Timed out waiting for drm cache flush.\n");
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_ttm_cache_flush);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use kmalloc if possible. Otherwise fall back to vmalloc.
|
* Use kmalloc if possible. Otherwise fall back to vmalloc.
|
||||||
|
@ -207,7 +208,7 @@ struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index)
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drm_ttm_populate(struct drm_ttm * ttm)
|
int drm_ttm_populate(struct drm_ttm * ttm)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
@ -308,7 +309,7 @@ void drm_ttm_unbind(struct drm_ttm * ttm)
|
||||||
drm_ttm_fixup_caching(ttm);
|
drm_ttm_fixup_caching(ttm);
|
||||||
}
|
}
|
||||||
|
|
||||||
int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset)
|
int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
|
||||||
{
|
{
|
||||||
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -325,17 +326,16 @@ int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (ttm->state == ttm_unbound && !cached) {
|
if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) {
|
||||||
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
|
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = be->func->bind(be, aper_offset, cached))) {
|
if ((ret = be->func->bind(be, bo_mem))) {
|
||||||
ttm->state = ttm_evicted;
|
ttm->state = ttm_evicted;
|
||||||
DRM_ERROR("Couldn't bind backend.\n");
|
DRM_ERROR("Couldn't bind backend.\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ttm->aper_offset = aper_offset;
|
|
||||||
ttm->state = ttm_bound;
|
ttm->state = ttm_bound;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -38,9 +38,11 @@ struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev)
|
||||||
return drm_agp_init_ttm(dev);
|
return drm_agp_init_ttm(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
int i915_fence_types(struct drm_buffer_object *bo, uint32_t * type)
|
int i915_fence_types(struct drm_buffer_object *bo,
|
||||||
|
uint32_t * fclass,
|
||||||
|
uint32_t * type)
|
||||||
{
|
{
|
||||||
if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
||||||
*type = 3;
|
*type = 3;
|
||||||
else
|
else
|
||||||
*type = 1;
|
*type = 1;
|
||||||
|
@ -71,6 +73,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type,
|
||||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||||
_DRM_FLAG_MEMTYPE_CACHED;
|
_DRM_FLAG_MEMTYPE_CACHED;
|
||||||
man->drm_bus_maptype = 0;
|
man->drm_bus_maptype = 0;
|
||||||
|
man->gpu_offset = 0;
|
||||||
break;
|
break;
|
||||||
case DRM_BO_MEM_TT:
|
case DRM_BO_MEM_TT:
|
||||||
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
||||||
|
@ -84,6 +87,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type,
|
||||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||||
_DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
|
_DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
|
||||||
man->drm_bus_maptype = _DRM_AGP;
|
man->drm_bus_maptype = _DRM_AGP;
|
||||||
|
man->gpu_offset = 0;
|
||||||
break;
|
break;
|
||||||
case DRM_BO_MEM_VRAM:
|
case DRM_BO_MEM_VRAM:
|
||||||
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
if (!(drm_core_has_AGP(dev) && dev->agp)) {
|
||||||
|
@ -97,6 +101,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type,
|
||||||
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
|
||||||
_DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
|
_DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
|
||||||
man->drm_bus_maptype = _DRM_AGP;
|
man->drm_bus_maptype = _DRM_AGP;
|
||||||
|
man->gpu_offset = 0;
|
||||||
break;
|
break;
|
||||||
case DRM_BO_MEM_PRIV0: /* for OS preallocated space */
|
case DRM_BO_MEM_PRIV0: /* for OS preallocated space */
|
||||||
DRM_ERROR("PRIV0 not used yet.\n");
|
DRM_ERROR("PRIV0 not used yet.\n");
|
||||||
|
@ -199,7 +204,7 @@ static int i915_move_flip(struct drm_buffer_object * bo,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start);
|
ret = drm_bind_ttm(bo->ttm, &tmp_mem);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ static void i915_perform_flush(struct drm_device * dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
struct drm_fence_class_manager *fc = &fm->class[0];
|
struct drm_fence_class_manager *fc = &fm->fence_class[0];
|
||||||
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
||||||
uint32_t flush_flags = 0;
|
uint32_t flush_flags = 0;
|
||||||
uint32_t flush_sequence = 0;
|
uint32_t flush_sequence = 0;
|
||||||
|
@ -63,7 +63,8 @@ static void i915_perform_flush(struct drm_device * dev)
|
||||||
|
|
||||||
diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK;
|
diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK;
|
||||||
if (diff < driver->wrap_diff && diff != 0) {
|
if (diff < driver->wrap_diff && diff != 0) {
|
||||||
drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE);
|
drm_fence_handler(dev, 0, sequence,
|
||||||
|
DRM_FENCE_TYPE_EXE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {
|
if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {
|
||||||
|
@ -82,7 +83,7 @@ static void i915_perform_flush(struct drm_device * dev)
|
||||||
flush_flags = dev_priv->flush_flags;
|
flush_flags = dev_priv->flush_flags;
|
||||||
flush_sequence = dev_priv->flush_sequence;
|
flush_sequence = dev_priv->flush_sequence;
|
||||||
dev_priv->flush_pending = 0;
|
dev_priv->flush_pending = 0;
|
||||||
drm_fence_handler(dev, 0, flush_sequence, flush_flags);
|
drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +104,7 @@ static void i915_perform_flush(struct drm_device * dev)
|
||||||
flush_flags = dev_priv->flush_flags;
|
flush_flags = dev_priv->flush_flags;
|
||||||
flush_sequence = dev_priv->flush_sequence;
|
flush_sequence = dev_priv->flush_sequence;
|
||||||
dev_priv->flush_pending = 0;
|
dev_priv->flush_pending = 0;
|
||||||
drm_fence_handler(dev, 0, flush_sequence, flush_flags);
|
drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,16 @@
|
||||||
#include "drm_pciids.h"
|
#include "drm_pciids.h"
|
||||||
|
|
||||||
static struct pci_device_id pciidlist[] = {
|
static struct pci_device_id pciidlist[] = {
|
||||||
nouveau_PCI_IDS
|
{
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
|
||||||
|
.class = PCI_BASE_CLASS_DISPLAY << 16,
|
||||||
|
.class_mask = 0xff << 16,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
|
||||||
|
.class = PCI_BASE_CLASS_DISPLAY << 16,
|
||||||
|
.class_mask = 0xff << 16,
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct drm_ioctl_desc nouveau_ioctls[];
|
extern struct drm_ioctl_desc nouveau_ioctls[];
|
||||||
|
|
|
@ -80,16 +80,16 @@ nouveau_sgdma_clear(struct drm_ttm_backend *be)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start,
|
nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem)
|
||||||
int cached)
|
|
||||||
{
|
{
|
||||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
||||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||||
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
|
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
|
||||||
uint64_t offset = (pg_start << PAGE_SHIFT);
|
uint64_t offset = (mem->mm_node->start << PAGE_SHIFT);
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached);
|
DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start,
|
||||||
|
offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1);
|
||||||
|
|
||||||
if (offset & NV_CTXDMA_PAGE_MASK)
|
if (offset & NV_CTXDMA_PAGE_MASK)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -188,7 +188,6 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
|
||||||
nvbe->dev = dev;
|
nvbe->dev = dev;
|
||||||
|
|
||||||
nvbe->backend.func = &nouveau_sgdma_backend;
|
nvbe->backend.func = &nouveau_sgdma_backend;
|
||||||
nvbe->backend.mem_type = DRM_BO_MEM_TT;
|
|
||||||
|
|
||||||
return &nvbe->backend;
|
return &nvbe->backend;
|
||||||
}
|
}
|
||||||
|
@ -278,6 +277,8 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
struct drm_ttm_backend *be;
|
struct drm_ttm_backend *be;
|
||||||
struct drm_scatter_gather sgreq;
|
struct drm_scatter_gather sgreq;
|
||||||
|
struct drm_mm_node mm_node;
|
||||||
|
struct drm_bo_mem_reg mem;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev);
|
dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev);
|
||||||
|
@ -303,7 +304,10 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = be->func->bind(be, 0, 0))) {
|
mm_node.start = 0;
|
||||||
|
mem.mm_node = &mm_node;
|
||||||
|
|
||||||
|
if ((ret = be->func->bind(be, &mem))) {
|
||||||
DRM_ERROR("failed bind: %d\n", ret);
|
DRM_ERROR("failed bind: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
../shared-core/nouveau_swmthd.c
|
|
@ -0,0 +1 @@
|
||||||
|
../shared-core/nouveau_swmthd.h
|
|
@ -1 +0,0 @@
|
||||||
../shared-core/nv30_graph.c
|
|
|
@ -37,7 +37,8 @@ struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev)
|
||||||
return drm_agp_init_ttm(dev);
|
return drm_agp_init_ttm(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
int via_fence_types(struct drm_buffer_object *bo, uint32_t * type)
|
int via_fence_types(struct drm_buffer_object *bo, uint32_t * fclass,
|
||||||
|
uint32_t * type)
|
||||||
{
|
{
|
||||||
*type = 3;
|
*type = 3;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -42,7 +42,7 @@
|
||||||
static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
|
static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
|
||||||
{
|
{
|
||||||
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
||||||
struct drm_fence_class_manager *fc = &dev->fm.class[class];
|
struct drm_fence_class_manager *fc = &dev->fm.fence_class[class];
|
||||||
uint32_t pending_flush_types = 0;
|
uint32_t pending_flush_types = 0;
|
||||||
uint32_t signaled_flush_types = 0;
|
uint32_t signaled_flush_types = 0;
|
||||||
uint32_t status;
|
uint32_t status;
|
||||||
|
@ -98,7 +98,8 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
|
||||||
drm_idlelock_release(&dev->lock);
|
drm_idlelock_release(&dev->lock);
|
||||||
dev_priv->have_idlelock = 0;
|
dev_priv->have_idlelock = 0;
|
||||||
}
|
}
|
||||||
drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, signaled_flush_types);
|
drm_fence_handler(dev, 0, dev_priv->emit_0_sequence,
|
||||||
|
signaled_flush_types, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,7 +205,7 @@ void via_fence_timer(unsigned long data)
|
||||||
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
||||||
struct drm_fence_manager *fm = &dev->fm;
|
struct drm_fence_manager *fm = &dev->fm;
|
||||||
uint32_t pending_flush;
|
uint32_t pending_flush;
|
||||||
struct drm_fence_class_manager *fc = &dev->fm.class[0];
|
struct drm_fence_class_manager *fc = &dev->fm.fence_class[0];
|
||||||
|
|
||||||
if (!dev_priv)
|
if (!dev_priv)
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -138,11 +138,11 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data,
|
||||||
xgi_emit_flush(info, FALSE);
|
xgi_emit_flush(info, FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
info->cmdring.last_ptr[1] = begin[1];
|
info->cmdring.last_ptr[1] = cpu_to_le32(begin[1]);
|
||||||
info->cmdring.last_ptr[2] = begin[2];
|
info->cmdring.last_ptr[2] = cpu_to_le32(begin[2]);
|
||||||
info->cmdring.last_ptr[3] = begin[3];
|
info->cmdring.last_ptr[3] = cpu_to_le32(begin[3]);
|
||||||
DRM_WRITEMEMORYBARRIER();
|
DRM_WRITEMEMORYBARRIER();
|
||||||
info->cmdring.last_ptr[0] = begin[0];
|
info->cmdring.last_ptr[0] = cpu_to_le32(begin[0]);
|
||||||
|
|
||||||
triggerHWCommandList(info);
|
triggerHWCommandList(info);
|
||||||
}
|
}
|
||||||
|
@ -258,6 +258,8 @@ void xgi_emit_flush(struct xgi_info * info, bool stop)
|
||||||
const unsigned int flush_size = sizeof(flush_command);
|
const unsigned int flush_size = sizeof(flush_command);
|
||||||
u32 *batch_addr;
|
u32 *batch_addr;
|
||||||
u32 hw_addr;
|
u32 hw_addr;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
|
||||||
/* check buf is large enough to contain a new flush batch */
|
/* check buf is large enough to contain a new flush batch */
|
||||||
if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) {
|
if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) {
|
||||||
|
@ -269,18 +271,20 @@ void xgi_emit_flush(struct xgi_info * info, bool stop)
|
||||||
batch_addr = info->cmdring.ptr
|
batch_addr = info->cmdring.ptr
|
||||||
+ (info->cmdring.ring_offset / 4);
|
+ (info->cmdring.ring_offset / 4);
|
||||||
|
|
||||||
(void) memcpy(batch_addr, flush_command, flush_size);
|
for (i = 0; i < (flush_size / 4); i++) {
|
||||||
|
batch_addr[i] = cpu_to_le32(flush_command[i]);
|
||||||
if (stop) {
|
|
||||||
*batch_addr |= BEGIN_STOP_STORE_CURRENT_POINTER_MASK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK | (flush_size / 4);
|
if (stop) {
|
||||||
info->cmdring.last_ptr[2] = hw_addr >> 4;
|
*batch_addr |= cpu_to_le32(BEGIN_STOP_STORE_CURRENT_POINTER_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK | (flush_size / 4));
|
||||||
|
info->cmdring.last_ptr[2] = cpu_to_le32(hw_addr >> 4);
|
||||||
info->cmdring.last_ptr[3] = 0;
|
info->cmdring.last_ptr[3] = 0;
|
||||||
DRM_WRITEMEMORYBARRIER();
|
DRM_WRITEMEMORYBARRIER();
|
||||||
info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24)
|
info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24)
|
||||||
| (BEGIN_VALID_MASK);
|
| (BEGIN_VALID_MASK));
|
||||||
|
|
||||||
triggerHWCommandList(info);
|
triggerHWCommandList(info);
|
||||||
|
|
||||||
|
@ -299,13 +303,13 @@ void xgi_emit_flush(struct xgi_info * info, bool stop)
|
||||||
*/
|
*/
|
||||||
void xgi_emit_nop(struct xgi_info * info)
|
void xgi_emit_nop(struct xgi_info * info)
|
||||||
{
|
{
|
||||||
info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK
|
info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK
|
||||||
| (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence);
|
| (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence));
|
||||||
info->cmdring.last_ptr[2] = 0;
|
info->cmdring.last_ptr[2] = 0;
|
||||||
info->cmdring.last_ptr[3] = 0;
|
info->cmdring.last_ptr[3] = 0;
|
||||||
DRM_WRITEMEMORYBARRIER();
|
DRM_WRITEMEMORYBARRIER();
|
||||||
info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24)
|
info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24)
|
||||||
| (BEGIN_VALID_MASK);
|
| (BEGIN_VALID_MASK));
|
||||||
|
|
||||||
triggerHWCommandList(info);
|
triggerHWCommandList(info);
|
||||||
|
|
||||||
|
|
|
@ -351,9 +351,9 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = (struct drm_device *) arg;
|
struct drm_device *dev = (struct drm_device *) arg;
|
||||||
struct xgi_info *info = dev->dev_private;
|
struct xgi_info *info = dev->dev_private;
|
||||||
const u32 irq_bits = DRM_READ32(info->mmio_map,
|
const u32 irq_bits = le32_to_cpu(DRM_READ32(info->mmio_map,
|
||||||
(0x2800
|
(0x2800
|
||||||
+ M2REG_AUTO_LINK_STATUS_ADDRESS))
|
+ M2REG_AUTO_LINK_STATUS_ADDRESS)))
|
||||||
& (M2REG_ACTIVE_TIMER_INTERRUPT_MASK
|
& (M2REG_ACTIVE_TIMER_INTERRUPT_MASK
|
||||||
| M2REG_ACTIVE_INTERRUPT_0_MASK
|
| M2REG_ACTIVE_INTERRUPT_0_MASK
|
||||||
| M2REG_ACTIVE_INTERRUPT_2_MASK
|
| M2REG_ACTIVE_INTERRUPT_2_MASK
|
||||||
|
@ -363,7 +363,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
|
||||||
if (irq_bits != 0) {
|
if (irq_bits != 0) {
|
||||||
DRM_WRITE32(info->mmio_map,
|
DRM_WRITE32(info->mmio_map,
|
||||||
0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
|
0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
|
||||||
M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits);
|
cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits));
|
||||||
xgi_fence_handler(dev);
|
xgi_fence_handler(dev);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -35,11 +35,11 @@
|
||||||
|
|
||||||
#define DRIVER_NAME "xgi"
|
#define DRIVER_NAME "xgi"
|
||||||
#define DRIVER_DESC "XGI XP5 / XP10 / XG47"
|
#define DRIVER_DESC "XGI XP5 / XP10 / XG47"
|
||||||
#define DRIVER_DATE "20070918"
|
#define DRIVER_DATE "20071003"
|
||||||
|
|
||||||
#define DRIVER_MAJOR 1
|
#define DRIVER_MAJOR 1
|
||||||
#define DRIVER_MINOR 1
|
#define DRIVER_MINOR 1
|
||||||
#define DRIVER_PATCHLEVEL 0
|
#define DRIVER_PATCHLEVEL 3
|
||||||
|
|
||||||
#include "xgi_cmdlist.h"
|
#include "xgi_cmdlist.h"
|
||||||
#include "xgi_drm.h"
|
#include "xgi_drm.h"
|
||||||
|
|
|
@ -33,7 +33,7 @@
|
||||||
static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
|
static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
|
||||||
{
|
{
|
||||||
struct xgi_info * info = dev->dev_private;
|
struct xgi_info * info = dev->dev_private;
|
||||||
struct drm_fence_class_manager * fc = &dev->fm.class[class];
|
struct drm_fence_class_manager * fc = &dev->fm.fence_class[class];
|
||||||
uint32_t pending_flush_types = 0;
|
uint32_t pending_flush_types = 0;
|
||||||
uint32_t signaled_flush_types = 0;
|
uint32_t signaled_flush_types = 0;
|
||||||
|
|
||||||
|
@ -48,8 +48,8 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
|
||||||
|
|
||||||
if (pending_flush_types) {
|
if (pending_flush_types) {
|
||||||
if (pending_flush_types & DRM_FENCE_TYPE_EXE) {
|
if (pending_flush_types & DRM_FENCE_TYPE_EXE) {
|
||||||
const u32 begin_id = DRM_READ32(info->mmio_map,
|
const u32 begin_id = le32_to_cpu(DRM_READ32(info->mmio_map,
|
||||||
0x2820)
|
0x2820))
|
||||||
& BEGIN_BEGIN_IDENTIFICATION_MASK;
|
& BEGIN_BEGIN_IDENTIFICATION_MASK;
|
||||||
|
|
||||||
if (begin_id != info->complete_sequence) {
|
if (begin_id != info->complete_sequence) {
|
||||||
|
@ -60,7 +60,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
|
||||||
|
|
||||||
if (signaled_flush_types) {
|
if (signaled_flush_types) {
|
||||||
drm_fence_handler(dev, 0, info->complete_sequence,
|
drm_fence_handler(dev, 0, info->complete_sequence,
|
||||||
signaled_flush_types);
|
signaled_flush_types, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,12 +38,12 @@ static unsigned int s_invalid_begin = 0;
|
||||||
|
|
||||||
static bool xgi_validate_signal(struct drm_map * map)
|
static bool xgi_validate_signal(struct drm_map * map)
|
||||||
{
|
{
|
||||||
if (DRM_READ32(map, 0x2800) & 0x001c0000) {
|
if (le32_to_cpu(DRM_READ32(map, 0x2800) & 0x001c0000)) {
|
||||||
u16 check;
|
u16 check;
|
||||||
|
|
||||||
/* Check Read back status */
|
/* Check Read back status */
|
||||||
DRM_WRITE8(map, 0x235c, 0x80);
|
DRM_WRITE8(map, 0x235c, 0x80);
|
||||||
check = DRM_READ16(map, 0x2360);
|
check = le16_to_cpu(DRM_READ16(map, 0x2360));
|
||||||
|
|
||||||
if ((check & 0x3f) != ((check & 0x3f00) >> 8)) {
|
if ((check & 0x3f) != ((check & 0x3f00) >> 8)) {
|
||||||
return FALSE;
|
return FALSE;
|
||||||
|
@ -51,28 +51,28 @@ static bool xgi_validate_signal(struct drm_map * map)
|
||||||
|
|
||||||
/* Check RO channel */
|
/* Check RO channel */
|
||||||
DRM_WRITE8(map, 0x235c, 0x83);
|
DRM_WRITE8(map, 0x235c, 0x83);
|
||||||
check = DRM_READ16(map, 0x2360);
|
check = le16_to_cpu(DRM_READ16(map, 0x2360));
|
||||||
if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
|
if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
|
||||||
return FALSE;
|
return FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check RW channel */
|
/* Check RW channel */
|
||||||
DRM_WRITE8(map, 0x235c, 0x88);
|
DRM_WRITE8(map, 0x235c, 0x88);
|
||||||
check = DRM_READ16(map, 0x2360);
|
check = le16_to_cpu(DRM_READ16(map, 0x2360));
|
||||||
if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
|
if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
|
||||||
return FALSE;
|
return FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check RO channel outstanding */
|
/* Check RO channel outstanding */
|
||||||
DRM_WRITE8(map, 0x235c, 0x8f);
|
DRM_WRITE8(map, 0x235c, 0x8f);
|
||||||
check = DRM_READ16(map, 0x2360);
|
check = le16_to_cpu(DRM_READ16(map, 0x2360));
|
||||||
if (0 != (check & 0x3ff)) {
|
if (0 != (check & 0x3ff)) {
|
||||||
return FALSE;
|
return FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check RW channel outstanding */
|
/* Check RW channel outstanding */
|
||||||
DRM_WRITE8(map, 0x235c, 0x90);
|
DRM_WRITE8(map, 0x235c, 0x90);
|
||||||
check = DRM_READ16(map, 0x2360);
|
check = le16_to_cpu(DRM_READ16(map, 0x2360));
|
||||||
if (0 != (check & 0x3ff)) {
|
if (0 != (check & 0x3ff)) {
|
||||||
return FALSE;
|
return FALSE;
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
|
||||||
int time_out = 0xffff;
|
int time_out = 0xffff;
|
||||||
|
|
||||||
DRM_WRITE8(map, 0xb057, 8);
|
DRM_WRITE8(map, 0xb057, 8);
|
||||||
while (0 != (DRM_READ32(map, 0x2800) & 0xf0000000)) {
|
while (0 != le32_to_cpu(DRM_READ32(map, 0x2800) & 0xf0000000)) {
|
||||||
while (0 != ((--time_out) & 0xfff))
|
while (0 != ((--time_out) & 0xfff))
|
||||||
/* empty */ ;
|
/* empty */ ;
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
|
||||||
u8 old_36;
|
u8 old_36;
|
||||||
|
|
||||||
DRM_INFO("Can not reset back 0x%x!\n",
|
DRM_INFO("Can not reset back 0x%x!\n",
|
||||||
DRM_READ32(map, 0x2800));
|
le32_to_cpu(DRM_READ32(map, 0x2800)));
|
||||||
|
|
||||||
DRM_WRITE8(map, 0xb057, 0);
|
DRM_WRITE8(map, 0xb057, 0);
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
|
||||||
|
|
||||||
bool xgi_ge_irq_handler(struct xgi_info * info)
|
bool xgi_ge_irq_handler(struct xgi_info * info)
|
||||||
{
|
{
|
||||||
const u32 int_status = DRM_READ32(info->mmio_map, 0x2810);
|
const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810));
|
||||||
bool is_support_auto_reset = FALSE;
|
bool is_support_auto_reset = FALSE;
|
||||||
|
|
||||||
/* Check GE on/off */
|
/* Check GE on/off */
|
||||||
|
@ -146,7 +146,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info)
|
||||||
/* We got GE stall interrupt.
|
/* We got GE stall interrupt.
|
||||||
*/
|
*/
|
||||||
DRM_WRITE32(info->mmio_map, 0x2810,
|
DRM_WRITE32(info->mmio_map, 0x2810,
|
||||||
int_status | 0x04000000);
|
cpu_to_le32(int_status | 0x04000000));
|
||||||
|
|
||||||
if (is_support_auto_reset) {
|
if (is_support_auto_reset) {
|
||||||
static cycles_t last_tick;
|
static cycles_t last_tick;
|
||||||
|
@ -176,7 +176,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info)
|
||||||
} else if (0 != (0x1 & int_status)) {
|
} else if (0 != (0x1 & int_status)) {
|
||||||
s_invalid_begin++;
|
s_invalid_begin++;
|
||||||
DRM_WRITE32(info->mmio_map, 0x2810,
|
DRM_WRITE32(info->mmio_map, 0x2810,
|
||||||
(int_status & ~0x01) | 0x04000000);
|
cpu_to_le32((int_status & ~0x01) | 0x04000000));
|
||||||
}
|
}
|
||||||
|
|
||||||
return TRUE;
|
return TRUE;
|
||||||
|
|
|
@ -640,6 +640,7 @@ struct drm_set_version {
|
||||||
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
|
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
|
||||||
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
|
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
|
||||||
#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
|
#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
|
||||||
|
#define DRM_FENCE_FLAG_NO_USER 0x00000010
|
||||||
|
|
||||||
/* Reserved for driver use */
|
/* Reserved for driver use */
|
||||||
#define DRM_FENCE_MASK_DRIVER 0xFF000000
|
#define DRM_FENCE_MASK_DRIVER 0xFF000000
|
||||||
|
@ -648,12 +649,14 @@ struct drm_set_version {
|
||||||
|
|
||||||
struct drm_fence_arg {
|
struct drm_fence_arg {
|
||||||
unsigned int handle;
|
unsigned int handle;
|
||||||
unsigned int class;
|
unsigned int fence_class;
|
||||||
unsigned int type;
|
unsigned int type;
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
unsigned int signaled;
|
unsigned int signaled;
|
||||||
|
unsigned int error;
|
||||||
|
unsigned int sequence;
|
||||||
unsigned int pad64;
|
unsigned int pad64;
|
||||||
uint64_t expand_pad[3]; /*Future expansion */
|
uint64_t expand_pad[2]; /*Future expansion */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Buffer permissions, referring to how the GPU uses the buffers.
|
/* Buffer permissions, referring to how the GPU uses the buffers.
|
||||||
|
@ -752,13 +755,6 @@ struct drm_fence_arg {
|
||||||
#define DRM_BO_INIT_MINOR 1
|
#define DRM_BO_INIT_MINOR 1
|
||||||
|
|
||||||
|
|
||||||
enum drm_bo_type {
|
|
||||||
drm_bo_type_dc,
|
|
||||||
drm_bo_type_user,
|
|
||||||
drm_bo_type_fake,
|
|
||||||
drm_bo_type_kernel, /* for initial kernel allocations */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_bo_info_req {
|
struct drm_bo_info_req {
|
||||||
uint64_t mask;
|
uint64_t mask;
|
||||||
uint64_t flags;
|
uint64_t flags;
|
||||||
|
@ -774,8 +770,6 @@ struct drm_bo_create_req {
|
||||||
uint64_t buffer_start;
|
uint64_t buffer_start;
|
||||||
unsigned int hint;
|
unsigned int hint;
|
||||||
unsigned int page_alignment;
|
unsigned int page_alignment;
|
||||||
enum drm_bo_type type;
|
|
||||||
unsigned int pad64;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_bo_op_req {
|
struct drm_bo_op_req {
|
||||||
|
@ -1062,7 +1056,6 @@ struct drm_mode_mode_cmd {
|
||||||
#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
|
#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
|
||||||
|
|
||||||
#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
|
#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
|
||||||
#define DRM_IOCTL_FENCE_DESTROY DRM_IOWR(0xc5, struct drm_fence_arg)
|
|
||||||
#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
|
#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
|
||||||
#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
|
#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
|
||||||
#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
|
#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
|
||||||
|
@ -1072,7 +1065,6 @@ struct drm_mode_mode_cmd {
|
||||||
#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
|
#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
|
||||||
|
|
||||||
#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
|
#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
|
||||||
#define DRM_IOCTL_BO_DESTROY DRM_IOWR(0xce, struct drm_bo_handle_arg)
|
|
||||||
#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
|
#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
|
||||||
#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
|
#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
|
||||||
#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
|
#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
|
||||||
|
|
|
@ -481,261 +481,6 @@
|
||||||
0x10DE 0x009D NV40 "NVidia Quadro FX 4500"
|
0x10DE 0x009D NV40 "NVidia Quadro FX 4500"
|
||||||
0x10DE 0x009E NV40 "NVidia 0x009E"
|
0x10DE 0x009E NV40 "NVidia 0x009E"
|
||||||
|
|
||||||
[nouveau]
|
|
||||||
0x10de 0x0020 NV_04 "RIVA TNT"
|
|
||||||
0x10de 0x0028 NV_04 "RIVA TNT2/TNT2 Pro"
|
|
||||||
0x10de 0x0029 NV_04 "RIVA TNT2 Ultra"
|
|
||||||
0x10de 0x002a NV_04 "Riva TnT2"
|
|
||||||
0x10de 0x002b NV_04 "Riva TnT2"
|
|
||||||
0x10de 0x002c NV_04 "Vanta/Vanta LT"
|
|
||||||
0x10de 0x002d NV_04 "RIVA TNT2 Model 64/Model 64 Pro"
|
|
||||||
0x10de 0x002e NV_04 "Vanta"
|
|
||||||
0x10de 0x002f NV_04 "Vanta"
|
|
||||||
0x10de 0x0040 NV_40 "GeForce 6800 Ultra"
|
|
||||||
0x10de 0x0041 NV_40 "GeForce 6800"
|
|
||||||
0x10de 0x0042 NV_40 "GeForce 6800 LE"
|
|
||||||
0x10de 0x0043 NV_40 "NV40.3"
|
|
||||||
0x10de 0x0044 NV_40 "GeForce 6800 XT"
|
|
||||||
0x10de 0x0045 NV_40 "GeForce 6800 GT"
|
|
||||||
0x10de 0x0046 NV_40 "GeForce 6800 GT"
|
|
||||||
0x10de 0x0047 NV_40 "GeForce 6800 GS"
|
|
||||||
0x10de 0x0048 NV_40 "GeForce 6800 XT"
|
|
||||||
0x10de 0x0049 NV_40 "NV40GL"
|
|
||||||
0x10de 0x004d NV_40 "Quadro FX 4000"
|
|
||||||
0x10de 0x004e NV_40 "Quadro FX 4000"
|
|
||||||
0x10de 0x0090 NV_40 "GeForce 7800 GTX"
|
|
||||||
0x10de 0x0091 NV_40 "GeForce 7800 GTX"
|
|
||||||
0x10de 0x0092 NV_40 "GeForce 7800 GT"
|
|
||||||
0x10de 0x0093 NV_40 "GeForce 7800 GS"
|
|
||||||
0x10de 0x0095 NV_40 "GeForce 7800 SLI"
|
|
||||||
0x10de 0x0098 NV_40 "GeForce Go 7800"
|
|
||||||
0x10de 0x0099 NV_40 "GeForce Go 7800 GTX"
|
|
||||||
0x10de 0x009d NV_40 "Quadro FX4500"
|
|
||||||
0x10de 0x00a0 NV_04 "Aladdin TNT2"
|
|
||||||
0x10de 0x00c0 NV_40 "GeForce 6800 GS"
|
|
||||||
0x10de 0x00c1 NV_40 "GeForce 6800"
|
|
||||||
0x10de 0x00c2 NV_40 "GeForce 6800 LE"
|
|
||||||
0x10de 0x00c3 NV_40 "Geforce 6800 XT"
|
|
||||||
0x10de 0x00c8 NV_40 "GeForce Go 6800"
|
|
||||||
0x10de 0x00c9 NV_40 "GeForce Go 6800 Ultra"
|
|
||||||
0x10de 0x00cc NV_40 "Quadro FX Go1400"
|
|
||||||
0x10de 0x00cd NV_40 "Quadro FX 3450/4000 SDI"
|
|
||||||
0x10de 0x00ce NV_40 "Quadro FX 1400"
|
|
||||||
0x10de 0x00f0 NV_40 "GeForce 6800/GeForce 6800 Ultra"
|
|
||||||
0x10de 0x00f1 NV_40 "GeForce 6600/GeForce 6600 GT"
|
|
||||||
0x10de 0x00f2 NV_40 "GeForce 6600/GeForce 6600 GT"
|
|
||||||
0x10de 0x00f3 NV_40 "GeForce 6200"
|
|
||||||
0x10de 0x00f4 NV_40 "GeForce 6600 LE"
|
|
||||||
0x10de 0x00f5 NV_40 "GeForce 7800 GS"
|
|
||||||
0x10de 0x00f6 NV_40 "GeForce 6600 GS"
|
|
||||||
0x10de 0x00f8 NV_40 "Quadro FX 3400/4400"
|
|
||||||
0x10de 0x00f9 NV_40 "GeForce 6800 Ultra/GeForce 6800 GT"
|
|
||||||
0x10de 0x00fa NV_30 "GeForce PCX 5750"
|
|
||||||
0x10de 0x00fb NV_30 "GeForce PCX 5900"
|
|
||||||
0x10de 0x00fc NV_30 "Quadro FX 330/GeForce PCX 5300"
|
|
||||||
0x10de 0x00fd NV_30 "Quadro FX 330/Quadro NVS280"
|
|
||||||
0x10de 0x00fe NV_30 "Quadro FX 1300"
|
|
||||||
0x10de 0x00ff NV_17 "GeForce PCX 4300"
|
|
||||||
0x10de 0x0100 NV_10 "GeForce 256 SDR"
|
|
||||||
0x10de 0x0101 NV_10 "GeForce 256 DDR"
|
|
||||||
0x10de 0x0103 NV_10 "Quadro"
|
|
||||||
0x10de 0x0110 NV_11 "GeForce2 MX/MX 400"
|
|
||||||
0x10de 0x0111 NV_11 "GeForce2 MX 100 DDR/200 DDR"
|
|
||||||
0x10de 0x0112 NV_11 "GeForce2 Go"
|
|
||||||
0x10de 0x0113 NV_11 "Quadro2 MXR/EX/Go"
|
|
||||||
0x10de 0x0140 NV_40 "GeForce 6600 GT"
|
|
||||||
0x10de 0x0141 NV_40 "GeForce 6600"
|
|
||||||
0x10de 0x0142 NV_40 "GeForce 6600 LE"
|
|
||||||
0x10de 0x0143 NV_40 "GeForce 6600 VE"
|
|
||||||
0x10de 0x0144 NV_40 "GeForce Go 6600"
|
|
||||||
0x10de 0x0145 NV_40 "GeForce 6610 XL"
|
|
||||||
0x10de 0x0146 NV_40 "Geforce Go 6600TE/6200TE"
|
|
||||||
0x10de 0x0147 NV_40 "GeForce 6700 XL"
|
|
||||||
0x10de 0x0148 NV_40 "GeForce Go 6600"
|
|
||||||
0x10de 0x0149 NV_40 "GeForce Go 6600 GT"
|
|
||||||
0x10de 0x014a NV_40 "Quadro NVS 440"
|
|
||||||
0x10de 0x014c NV_40 "Quadro FX 550"
|
|
||||||
0x10de 0x014d NV_17 "Quadro FX 550"
|
|
||||||
0x10de 0x014e NV_40 "Quadro FX 540"
|
|
||||||
0x10de 0x014f NV_40 "GeForce 6200"
|
|
||||||
0x10de 0x0150 NV_15 "GeForce2 GTS/Pro"
|
|
||||||
0x10de 0x0151 NV_15 "GeForce2 Ti"
|
|
||||||
0x10de 0x0152 NV_15 "GeForce2 Ultra, Bladerunner"
|
|
||||||
0x10de 0x0153 NV_15 "Quadro2 Pro"
|
|
||||||
0x10de 0x0160 NV_44 "GeForce 6500"
|
|
||||||
0x10de 0x0161 NV_44 "GeForce 6200 TurboCache(TM)"
|
|
||||||
0x10de 0x0162 NV_44 "GeForce 6200 SE TurboCache (TM)"
|
|
||||||
0x10de 0x0163 NV_44 "GeForce 6200 LE"
|
|
||||||
0x10de 0x0164 NV_44 "GeForce Go 6200"
|
|
||||||
0x10de 0x0165 NV_44 "Quadro NVS 285"
|
|
||||||
0x10de 0x0166 NV_44 "GeForce Go 6400"
|
|
||||||
0x10de 0x0167 NV_44 "GeForce Go 6200 TurboCache"
|
|
||||||
0x10de 0x0168 NV_44 "GeForce Go 6200 TurboCache"
|
|
||||||
0x10de 0x0169 NV_44 "GeForce 6250"
|
|
||||||
0x10de 0x0170 NV_17 "GeForce4 MX 460"
|
|
||||||
0x10de 0x0171 NV_17 "GeForce4 MX 440"
|
|
||||||
0x10de 0x0172 NV_17 "GeForce4 MX 420"
|
|
||||||
0x10de 0x0173 NV_17 "GeForce4 MX 440-SE"
|
|
||||||
0x10de 0x0174 NV_17 "GeForce4 440 Go"
|
|
||||||
0x10de 0x0175 NV_17 "GeForce4 420 Go"
|
|
||||||
0x10de 0x0176 NV_17 "GeForce4 420 Go 32M"
|
|
||||||
0x10de 0x0177 NV_17 "GeForce4 460 Go"
|
|
||||||
0x10de 0x0178 NV_17 "Quadro4 550 XGL"
|
|
||||||
0x10de 0x0179 NV_17 "GeForce4 420 Go 32M"
|
|
||||||
0x10de 0x017a NV_17 "Quadro4 200/400 NVS"
|
|
||||||
0x10de 0x017b NV_17 "Quadro4 550 XGL"
|
|
||||||
0x10de 0x017c NV_17 "Quadro4 500 GoGL"
|
|
||||||
0x10de 0x017d NV_17 "GeForce4 410 Go 16M"
|
|
||||||
0x10de 0x0181 NV_17 "GeForce4 MX 440 AGP 8x"
|
|
||||||
0x10de 0x0182 NV_17 "GeForce4 MX 440SE AGP 8x"
|
|
||||||
0x10de 0x0183 NV_17 "GeForce4 MX 420 AGP 8x"
|
|
||||||
0x10de 0x0185 NV_17 "GeForce4 MX 4000 AGP 8x"
|
|
||||||
0x10de 0x0186 NV_17 "GeForce4 448 Go"
|
|
||||||
0x10de 0x0187 NV_17 "GeForce4 488 Go"
|
|
||||||
0x10de 0x0188 NV_17 "Quadro4 580 XGL"
|
|
||||||
0x10de 0x018a NV_17 "Quadro4 NVS AGP 8x"
|
|
||||||
0x10de 0x018b NV_17 "Quadro4 380 XGL"
|
|
||||||
0x10de 0x018c NV_17 "Quadro NVS 50 PCI"
|
|
||||||
0x10de 0x018d NV_17 "GeForce4 448 Go"
|
|
||||||
0x10de 0x0191 NV_50 "GeForce 8800 GTX"
|
|
||||||
0x10de 0x0193 NV_50 "GeForce 8800 GTS"
|
|
||||||
0x10de 0x0194 NV_50 "GeForce 8800 Ultra"
|
|
||||||
0x10de 0x019d NV_50 "Quadro FX 5600"
|
|
||||||
0x10de 0x019e NV_50 "Quadro FX 4600"
|
|
||||||
0x10de 0x01a0 NV_11|NV_NFORCE "GeForce2 MX Integrated Graphics"
|
|
||||||
0x10de 0x01d1 NV_44 "GeForce 7300 LE"
|
|
||||||
0x10de 0x01d3 NV_44 "Geforce 7300 SE"
|
|
||||||
0x10de 0x01d6 NV_44 "GeForce Go 7200"
|
|
||||||
0x10de 0x01d7 NV_44 "Quadro NVS 110M / GeForce Go 7300"
|
|
||||||
0x10de 0x01d8 NV_44 "GeForce Go 7400"
|
|
||||||
0x10de 0x01d9 NV_44 "GeForce Go 7400 GS"
|
|
||||||
0x10de 0x01da NV_44 "Quadro NVS 110M"
|
|
||||||
0x10de 0x01db NV_44 "Quadro NVS 120M"
|
|
||||||
0x10de 0x01dc NV_44 "Quadro FX 350M"
|
|
||||||
0x10de 0x01dd NV_44 "GeForce 7500 LE"
|
|
||||||
0x10de 0x01de NV_44 "Quadro FX 350"
|
|
||||||
0x10de 0x01df NV_44 "GeForce 7300 GS"
|
|
||||||
0x10de 0x01f0 NV_17|NV_NFORCE2 "GeForce4 MX - nForce GPU"
|
|
||||||
0x10de 0x0200 NV_20 "GeForce3"
|
|
||||||
0x10de 0x0201 NV_20 "GeForce3 Ti 200"
|
|
||||||
0x10de 0x0202 NV_20 "GeForce3 Ti 500"
|
|
||||||
0x10de 0x0203 NV_20 "Quadro DCC"
|
|
||||||
0x10de 0x0211 NV_40 "GeForce 6800"
|
|
||||||
0x10de 0x0212 NV_40 "GeForce 6800 LE"
|
|
||||||
0x10de 0x0215 NV_40 "GeForce 6800 GT"
|
|
||||||
0x10de 0x0218 NV_40 "GeForce 6800 XT"
|
|
||||||
0x10de 0x0221 NV_44 "GeForce 6200"
|
|
||||||
0x10de 0x0222 NV_44 "GeForce 6200 A-LE"
|
|
||||||
0x10de 0x0240 NV_44 "GeForce 6150"
|
|
||||||
0x10de 0x0241 NV_44 "GeForce 6150 LE"
|
|
||||||
0x10de 0x0242 NV_44 "GeForce 6100"
|
|
||||||
0x10de 0x0244 NV_44 "GeForce Go 6150"
|
|
||||||
0x10de 0x0247 NV_44 "GeForce Go 6100"
|
|
||||||
0x10de 0x0250 NV_25 "GeForce4 Ti 4600"
|
|
||||||
0x10de 0x0251 NV_25 "GeForce4 Ti 4400"
|
|
||||||
0x10de 0x0252 NV_25 "GeForce4 Ti"
|
|
||||||
0x10de 0x0253 NV_25 "GeForce4 Ti 4200"
|
|
||||||
0x10de 0x0258 NV_25 "Quadro4 900 XGL"
|
|
||||||
0x10de 0x0259 NV_25 "Quadro4 750 XGL"
|
|
||||||
0x10de 0x025b NV_25 "Quadro4 700 XGL"
|
|
||||||
0x10de 0x0280 NV_25 "GeForce4 Ti 4800"
|
|
||||||
0x10de 0x0281 NV_25 "GeForce4 Ti 4200 AGP 8x"
|
|
||||||
0x10de 0x0282 NV_25 "GeForce4 Ti 4800 SE"
|
|
||||||
0x10de 0x0286 NV_25 "GeForce4 Ti 4200 Go AGP 8x"
|
|
||||||
0x10de 0x0288 NV_25 "Quadro4 980 XGL"
|
|
||||||
0x10de 0x0289 NV_25 "Quadro4 780 XGL"
|
|
||||||
0x10de 0x028c NV_25 "Quadro4 700 GoGL"
|
|
||||||
0x10de 0x0290 NV_40 "GeForce 7900 GTX"
|
|
||||||
0x10de 0x0291 NV_40 "GeForce 7900 GT"
|
|
||||||
0x10de 0x0292 NV_40 "GeForce 7900 GS"
|
|
||||||
0x10de 0x0298 NV_40 "GeForce Go 7900 GS"
|
|
||||||
0x10de 0x0299 NV_40 "GeForce Go 7900 GTX"
|
|
||||||
0x10de 0x029a NV_40 "Quadro FX 2500M"
|
|
||||||
0x10de 0x029b NV_40 "Quadro FX 1500M"
|
|
||||||
0x10de 0x029c NV_40 "Quadro FX 5500"
|
|
||||||
0x10de 0x029d NV_40 "Quadro FX 3500"
|
|
||||||
0x10de 0x029e NV_40 "Quadro FX 1500"
|
|
||||||
0x10de 0x029f NV_40 "Quadro FX 4500 X2"
|
|
||||||
0x10de 0x02a0 NV_20 "XGPU"
|
|
||||||
0x10de 0x02e1 NV_40 "GeForce 7600 GS"
|
|
||||||
0x10de 0x0300 NV_30 "GeForce FX"
|
|
||||||
0x10de 0x0301 NV_30 "GeForce FX 5800 Ultra"
|
|
||||||
0x10de 0x0302 NV_30 "GeForce FX 5800"
|
|
||||||
0x10de 0x0308 NV_30 "Quadro FX 2000"
|
|
||||||
0x10de 0x0309 NV_30 "Quadro FX 1000"
|
|
||||||
0x10de 0x0311 NV_30 "GeForce FX 5600 Ultra"
|
|
||||||
0x10de 0x0312 NV_30 "GeForce FX 5600"
|
|
||||||
0x10de 0x0313 NV_30 "NV31"
|
|
||||||
0x10de 0x0314 NV_30 "GeForce FX 5600XT"
|
|
||||||
0x10de 0x0316 NV_30 "NV31M"
|
|
||||||
0x10de 0x0317 NV_30 "NV31M Pro"
|
|
||||||
0x10de 0x031a NV_30 "GeForce FX Go5600"
|
|
||||||
0x10de 0x031b NV_30 "GeForce FX Go5650"
|
|
||||||
0x10de 0x031d NV_30 "NV31GLM"
|
|
||||||
0x10de 0x031e NV_30 "NV31GLM Pro"
|
|
||||||
0x10de 0x031f NV_30 "NV31GLM Pro"
|
|
||||||
0x10de 0x0320 NV_34 "GeForce FX 5200"
|
|
||||||
0x10de 0x0321 NV_34 "GeForce FX 5200 Ultra"
|
|
||||||
0x10de 0x0322 NV_34 "GeForce FX 5200"
|
|
||||||
0x10de 0x0323 NV_34 "GeForce FX 5200LE"
|
|
||||||
0x10de 0x0324 NV_34 "GeForce FX Go5200"
|
|
||||||
0x10de 0x0325 NV_34 "GeForce FX Go5250"
|
|
||||||
0x10de 0x0326 NV_34 "GeForce FX 5500"
|
|
||||||
0x10de 0x0327 NV_34 "GeForce FX 5100"
|
|
||||||
0x10de 0x0328 NV_34 "GeForce FX Go5200 32M/64M"
|
|
||||||
0x10de 0x0329 NV_34 "GeForce FX Go5200"
|
|
||||||
0x10de 0x032a NV_34 "Quadro NVS 280 PCI"
|
|
||||||
0x10de 0x032b NV_34 "Quadro FX 500/600 PCI"
|
|
||||||
0x10de 0x032c NV_34 "GeForce FX Go 5300"
|
|
||||||
0x10de 0x032d NV_34 "GeForce FX Go5100"
|
|
||||||
0x10de 0x032f NV_34 "NV34GL"
|
|
||||||
0x10de 0x0330 NV_30 "GeForce FX 5900 Ultra"
|
|
||||||
0x10de 0x0331 NV_30 "GeForce FX 5900"
|
|
||||||
0x10de 0x0332 NV_30 "GeForce FX 5900XT"
|
|
||||||
0x10de 0x0333 NV_30 "GeForce FX 5950 Ultra"
|
|
||||||
0x10de 0x0334 NV_30 "GeForce FX 5900ZT"
|
|
||||||
0x10de 0x0338 NV_30 "Quadro FX 3000"
|
|
||||||
0x10de 0x033f NV_30 "Quadro FX 700"
|
|
||||||
0x10de 0x0341 NV_30 "GeForce FX 5700 Ultra"
|
|
||||||
0x10de 0x0342 NV_30 "GeForce FX 5700"
|
|
||||||
0x10de 0x0343 NV_30 "GeForce FX 5700LE"
|
|
||||||
0x10de 0x0344 NV_30 "GeForce FX 5700VE"
|
|
||||||
0x10de 0x0345 NV_30 "NV36.5"
|
|
||||||
0x10de 0x0347 NV_30 "GeForce FX Go5700"
|
|
||||||
0x10de 0x0348 NV_30 "GeForce FX Go5700"
|
|
||||||
0x10de 0x0349 NV_30 "NV36M Pro"
|
|
||||||
0x10de 0x034b NV_30 "NV36MAP"
|
|
||||||
0x10de 0x034c NV_30 "Quadro FX Go1000"
|
|
||||||
0x10de 0x034e NV_30 "Quadro FX 1100"
|
|
||||||
0x10de 0x034f NV_30 "NV36GL"
|
|
||||||
0x10de 0x0391 NV_40 "GeForce 7600 GT"
|
|
||||||
0x10de 0x0392 NV_40 "GeForce 7600 GS"
|
|
||||||
0x10de 0x0393 NV_40 "GeForce 7300 GT"
|
|
||||||
0x10de 0x0394 NV_40 "GeForce 7600 LE"
|
|
||||||
0x10de 0x0395 NV_40 "GeForce 7300 GT"
|
|
||||||
0x10de 0x0397 NV_40 "GeForce Go 7700"
|
|
||||||
0x10de 0x0398 NV_40 "GeForce Go 7600"
|
|
||||||
0x10de 0x0399 NV_40 "GeForce Go 7600 GT"
|
|
||||||
0x10de 0x039a NV_40 "Quadro NVS 300M"
|
|
||||||
0x10de 0x039b NV_40 "GeForce Go 7900 SE"
|
|
||||||
0x10de 0x039c NV_40 "Quadro FX 550M"
|
|
||||||
0x10de 0x039e NV_40 "Quadro FX 560"
|
|
||||||
0x10de 0x03d0 NV_44 "GeForce 6100 nForce 430"
|
|
||||||
0x10de 0x03d1 NV_44 "GeForce 6100 nForce 405"
|
|
||||||
0x10de 0x03d2 NV_44 "GeForce 6100 nForce 400"
|
|
||||||
0x10de 0x03d5 NV_44 "GeForce 6100 nForce 420"
|
|
||||||
0x10de 0x0400 NV_50 "GeForce 8600 GTS"
|
|
||||||
0x10de 0x0402 NV_50 "GeForce 8600 GT"
|
|
||||||
0x10de 0x0421 NV_50 "GeForce 8500 GT"
|
|
||||||
0x10de 0x0422 NV_50 "GeForce 8400 GS"
|
|
||||||
0x10de 0x0423 NV_50 "GeForce 8300 GS"
|
|
||||||
0x10de 0x0429 NV_50 "Quadro NVS 140"
|
|
||||||
0x12d2 0x0020 NV_04 "TNT"
|
|
||||||
0x12d2 0x0028 NV_04 "TNT2"
|
|
||||||
0x12d2 0x0029 NV_04 "UTNT2"
|
|
||||||
0x12d2 0x002c NV_04 "VTNT2"
|
|
||||||
0x12d2 0x00a0 NV_04 "ITNT2"
|
|
||||||
|
|
||||||
[xgi]
|
[xgi]
|
||||||
0x18ca 0x2200 0 "XP5"
|
0x18ca 0x2200 0 "XP5"
|
||||||
0x18ca 0x0047 0 "XP10 / XG47"
|
0x18ca 0x0047 0 "XP10 / XG47"
|
||||||
|
|
|
@ -116,6 +116,10 @@ static int i915_initialize(struct drm_device * dev,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef I915_HAVE_BUFFER
|
||||||
|
dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
|
||||||
|
#endif
|
||||||
|
|
||||||
dev_priv->sarea_priv = (drm_i915_sarea_t *)
|
dev_priv->sarea_priv = (drm_i915_sarea_t *)
|
||||||
((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
|
((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
|
||||||
|
|
||||||
|
@ -694,6 +698,343 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef I915_HAVE_BUFFER
|
||||||
|
struct i915_relocatee_info {
|
||||||
|
struct drm_buffer_object *buf;
|
||||||
|
unsigned long offset;
|
||||||
|
u32 *data_page;
|
||||||
|
unsigned page_offset;
|
||||||
|
struct drm_bo_kmap_obj kmap;
|
||||||
|
int is_iomem;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void i915_dereference_buffers_locked(struct drm_buffer_object **buffers,
|
||||||
|
unsigned num_buffers)
|
||||||
|
{
|
||||||
|
while (num_buffers--)
|
||||||
|
drm_bo_usage_deref_locked(&buffers[num_buffers]);
|
||||||
|
}
|
||||||
|
|
||||||
|
int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
|
||||||
|
struct drm_buffer_object **buffers,
|
||||||
|
struct i915_relocatee_info *relocatee,
|
||||||
|
uint32_t *reloc)
|
||||||
|
{
|
||||||
|
unsigned index;
|
||||||
|
unsigned long new_cmd_offset;
|
||||||
|
u32 val;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (reloc[2] >= num_buffers) {
|
||||||
|
DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
new_cmd_offset = reloc[0];
|
||||||
|
if (!relocatee->data_page ||
|
||||||
|
!drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
|
||||||
|
drm_bo_kunmap(&relocatee->kmap);
|
||||||
|
relocatee->offset = new_cmd_offset;
|
||||||
|
ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
|
||||||
|
1, &relocatee->kmap);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("Could not map command buffer to apply relocs\n %08lx", new_cmd_offset);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
|
||||||
|
&relocatee->is_iomem);
|
||||||
|
relocatee->page_offset = (relocatee->offset & PAGE_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
val = buffers[reloc[2]]->offset;
|
||||||
|
index = (reloc[0] - relocatee->page_offset) >> 2;
|
||||||
|
|
||||||
|
/* add in validate */
|
||||||
|
val = val + reloc[1];
|
||||||
|
|
||||||
|
relocatee->data_page[index] = val;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int i915_process_relocs(struct drm_file *file_priv,
|
||||||
|
uint32_t buf_handle,
|
||||||
|
uint32_t *reloc_buf_handle,
|
||||||
|
struct i915_relocatee_info *relocatee,
|
||||||
|
struct drm_buffer_object **buffers,
|
||||||
|
uint32_t num_buffers)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = file_priv->head->dev;
|
||||||
|
struct drm_buffer_object *reloc_list_object;
|
||||||
|
uint32_t cur_handle = *reloc_buf_handle;
|
||||||
|
uint32_t *reloc_page;
|
||||||
|
int ret, reloc_is_iomem, reloc_stride;
|
||||||
|
uint32_t num_relocs, reloc_offset, reloc_end, reloc_page_offset, next_offset, cur_offset;
|
||||||
|
struct drm_bo_kmap_obj reloc_kmap;
|
||||||
|
|
||||||
|
memset(&reloc_kmap, 0, sizeof(reloc_kmap));
|
||||||
|
|
||||||
|
reloc_list_object = drm_lookup_buffer_object(file_priv, cur_handle, 1);
|
||||||
|
if (!reloc_list_object)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ret = drm_bo_kmap(reloc_list_object, 0, 1, &reloc_kmap);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("Could not map relocation buffer.\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem);
|
||||||
|
num_relocs = reloc_page[0] & 0xffff;
|
||||||
|
|
||||||
|
if ((reloc_page[0] >> 16) & 0xffff) {
|
||||||
|
DRM_ERROR("Unsupported relocation type requested\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* get next relocate buffer handle */
|
||||||
|
*reloc_buf_handle = reloc_page[1];
|
||||||
|
reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
|
||||||
|
|
||||||
|
DRM_DEBUG("num relocs is %d, next is %08X\n", num_relocs, reloc_page[1]);
|
||||||
|
|
||||||
|
reloc_page_offset = 0;
|
||||||
|
reloc_offset = I915_RELOC_HEADER * sizeof(uint32_t);
|
||||||
|
reloc_end = reloc_offset + (num_relocs * reloc_stride);
|
||||||
|
|
||||||
|
do {
|
||||||
|
next_offset = drm_bo_offset_end(reloc_offset, reloc_end);
|
||||||
|
|
||||||
|
do {
|
||||||
|
cur_offset = ((reloc_offset + reloc_page_offset) & ~PAGE_MASK) / sizeof(uint32_t);
|
||||||
|
ret = i915_apply_reloc(file_priv, num_buffers,
|
||||||
|
buffers, relocatee, &reloc_page[cur_offset]);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
reloc_offset += reloc_stride;
|
||||||
|
} while (reloc_offset < next_offset);
|
||||||
|
|
||||||
|
drm_bo_kunmap(&reloc_kmap);
|
||||||
|
|
||||||
|
reloc_offset = next_offset;
|
||||||
|
if (reloc_offset != reloc_end) {
|
||||||
|
ret = drm_bo_kmap(reloc_list_object, reloc_offset >> PAGE_SHIFT, 1, &reloc_kmap);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("Could not map relocation buffer.\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem);
|
||||||
|
reloc_page_offset = reloc_offset & ~PAGE_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
} while (reloc_offset != reloc_end);
|
||||||
|
out:
|
||||||
|
drm_bo_kunmap(&reloc_kmap);
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
drm_bo_usage_deref_locked(&reloc_list_object);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Validate, add fence and relocate a block of bos from a userspace list
|
||||||
|
*/
|
||||||
|
int i915_validate_buffer_list(struct drm_file *file_priv,
|
||||||
|
unsigned int fence_class, uint64_t data,
|
||||||
|
struct drm_buffer_object **buffers,
|
||||||
|
uint32_t *num_buffers)
|
||||||
|
{
|
||||||
|
struct drm_i915_op_arg arg;
|
||||||
|
struct drm_bo_op_req *req = &arg.d.req;
|
||||||
|
struct drm_bo_arg_rep rep;
|
||||||
|
unsigned long next = 0;
|
||||||
|
int ret = 0;
|
||||||
|
unsigned buf_count = 0;
|
||||||
|
struct drm_device *dev = file_priv->head->dev;
|
||||||
|
uint32_t buf_reloc_handle, buf_handle;
|
||||||
|
struct i915_relocatee_info relocatee;
|
||||||
|
|
||||||
|
do {
|
||||||
|
if (buf_count >= *num_buffers) {
|
||||||
|
DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
buffers[buf_count] = NULL;
|
||||||
|
|
||||||
|
if (copy_from_user(&arg, (void __user *)(unsigned)data, sizeof(arg))) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arg.handled) {
|
||||||
|
data = arg.next;
|
||||||
|
buffers[buf_count] = drm_lookup_buffer_object(file_priv, req->arg_handle, 1);
|
||||||
|
buf_count++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
rep.ret = 0;
|
||||||
|
if (req->op != drm_bo_validate) {
|
||||||
|
DRM_ERROR
|
||||||
|
("Buffer object operation wasn't \"validate\".\n");
|
||||||
|
rep.ret = -EINVAL;
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
buf_handle = req->bo_req.handle;
|
||||||
|
buf_reloc_handle = arg.reloc_handle;
|
||||||
|
|
||||||
|
rep.ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
|
||||||
|
req->bo_req.fence_class,
|
||||||
|
req->bo_req.flags,
|
||||||
|
req->bo_req.mask,
|
||||||
|
req->bo_req.hint,
|
||||||
|
&rep.bo_info,
|
||||||
|
&buffers[buf_count]);
|
||||||
|
|
||||||
|
if (rep.ret) {
|
||||||
|
DRM_ERROR("error on handle validate %d\n", rep.ret);
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
next = arg.next;
|
||||||
|
arg.handled = 1;
|
||||||
|
arg.d.rep = rep;
|
||||||
|
|
||||||
|
if (copy_to_user((void __user *)(unsigned)data, &arg, sizeof(arg)))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
data = next;
|
||||||
|
buf_count++;
|
||||||
|
|
||||||
|
if (buf_reloc_handle) {
|
||||||
|
memset(&relocatee, 0, sizeof(relocatee));
|
||||||
|
|
||||||
|
relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
|
||||||
|
if (!relocatee.buf) {
|
||||||
|
DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (buf_reloc_handle) {
|
||||||
|
ret = i915_process_relocs(file_priv, buf_handle, &buf_reloc_handle, &relocatee, buffers, buf_count);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("process relocs failed\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
drm_bo_kunmap(&relocatee.kmap);
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
drm_bo_usage_deref_locked(&relocatee.buf);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
}
|
||||||
|
} while (next != 0);
|
||||||
|
*num_buffers = buf_count;
|
||||||
|
return 0;
|
||||||
|
out_err:
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
i915_dereference_buffers_locked(buffers, buf_count);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
*num_buffers = 0;
|
||||||
|
return (ret) ? ret : rep.ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int i915_execbuffer(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
||||||
|
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
|
||||||
|
dev_priv->sarea_priv;
|
||||||
|
struct drm_i915_execbuffer *exec_buf = data;
|
||||||
|
struct drm_i915_batchbuffer *batch = &exec_buf->batch;
|
||||||
|
struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
|
||||||
|
int num_buffers;
|
||||||
|
int ret;
|
||||||
|
struct drm_buffer_object **buffers;
|
||||||
|
struct drm_fence_object *fence;
|
||||||
|
|
||||||
|
if (!dev_priv->allow_batchbuffer) {
|
||||||
|
DRM_ERROR("Batchbuffer ioctl disabled\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
||||||
|
|
||||||
|
if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
|
||||||
|
batch->num_cliprects *
|
||||||
|
sizeof(struct drm_clip_rect)))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
num_buffers = exec_buf->num_buffers;
|
||||||
|
|
||||||
|
buffers = drm_calloc(num_buffers, sizeof(struct drm_buffer_object *), DRM_MEM_DRIVER);
|
||||||
|
if (!buffers)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/* validate buffer list + fixup relocations */
|
||||||
|
ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
|
||||||
|
buffers, &num_buffers);
|
||||||
|
if (ret)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
|
/* submit buffer */
|
||||||
|
batch->start = buffers[num_buffers-1]->offset;
|
||||||
|
|
||||||
|
DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
|
||||||
|
batch->start, batch->used, batch->num_cliprects);
|
||||||
|
|
||||||
|
ret = i915_dispatch_batchbuffer(dev, batch);
|
||||||
|
if (ret)
|
||||||
|
goto out_err0;
|
||||||
|
|
||||||
|
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
|
||||||
|
|
||||||
|
/* fence */
|
||||||
|
ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence);
|
||||||
|
if (ret)
|
||||||
|
goto out_err0;
|
||||||
|
|
||||||
|
if (!(fence_arg->flags & DRM_FENCE_FLAG_NO_USER)) {
|
||||||
|
ret = drm_fence_add_user_object(file_priv, fence, fence_arg->flags & DRM_FENCE_FLAG_SHAREABLE);
|
||||||
|
if (!ret) {
|
||||||
|
fence_arg->handle = fence->base.hash.key;
|
||||||
|
fence_arg->fence_class = fence->fence_class;
|
||||||
|
fence_arg->type = fence->type;
|
||||||
|
fence_arg->signaled = fence->signaled;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
out_err0:
|
||||||
|
|
||||||
|
/* handle errors */
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
i915_dereference_buffers_locked(buffers, num_buffers);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
out_free:
|
||||||
|
drm_free(buffers, (exec_buf->num_buffers * sizeof(struct drm_buffer_object *)), DRM_MEM_DRIVER);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int i915_do_cleanup_pageflip(struct drm_device * dev)
|
int i915_do_cleanup_pageflip(struct drm_device * dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
@ -724,13 +1065,14 @@ static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *f
|
||||||
|
|
||||||
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
||||||
|
|
||||||
if (param->planes & ~0x3) {
|
/* This is really planes */
|
||||||
|
if (param->pipes & ~0x3) {
|
||||||
DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
|
DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
|
||||||
param->planes);
|
param->pipes);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_dispatch_flip(dev, param->planes, 0);
|
i915_dispatch_flip(dev, param->pipes, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -915,6 +1257,9 @@ struct drm_ioctl_desc i915_ioctls[] = {
|
||||||
DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
|
||||||
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
|
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
|
||||||
|
#ifdef I915_HAVE_BUFFER
|
||||||
|
DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
|
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
|
||||||
|
|
|
@ -160,6 +160,7 @@ typedef struct drm_i915_sarea {
|
||||||
#define DRM_I915_VBLANK_SWAP 0x0f
|
#define DRM_I915_VBLANK_SWAP 0x0f
|
||||||
#define DRM_I915_MMIO 0x10
|
#define DRM_I915_MMIO 0x10
|
||||||
#define DRM_I915_HWS_ADDR 0x11
|
#define DRM_I915_HWS_ADDR 0x11
|
||||||
|
#define DRM_I915_EXECBUFFER 0x12
|
||||||
|
|
||||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||||
|
@ -177,12 +178,18 @@ typedef struct drm_i915_sarea {
|
||||||
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
|
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
|
||||||
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
|
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
|
||||||
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
|
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
|
||||||
|
#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
|
||||||
|
|
||||||
/* Asynchronous page flipping:
|
/* Asynchronous page flipping:
|
||||||
*/
|
*/
|
||||||
typedef struct drm_i915_flip {
|
typedef struct drm_i915_flip {
|
||||||
int planes;
|
/*
|
||||||
|
* This is really talking about planes, and we could rename it
|
||||||
|
* except for the fact that some of the duplicated i915_drm.h files
|
||||||
|
* out there check for HAVE_I915_FLIP and so might pick up this
|
||||||
|
* version.
|
||||||
|
*/
|
||||||
|
int pipes;
|
||||||
} drm_i915_flip_t;
|
} drm_i915_flip_t;
|
||||||
|
|
||||||
/* Allow drivers to submit batchbuffers directly to hardware, relying
|
/* Allow drivers to submit batchbuffers directly to hardware, relying
|
||||||
|
@ -319,4 +326,40 @@ typedef struct drm_i915_hws_addr {
|
||||||
uint64_t addr;
|
uint64_t addr;
|
||||||
} drm_i915_hws_addr_t;
|
} drm_i915_hws_addr_t;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Relocation header is 4 uint32_ts
|
||||||
|
* 0 - (16-bit relocation type << 16)| 16 bit reloc count
|
||||||
|
* 1 - buffer handle for another list of relocs
|
||||||
|
* 2-3 - spare.
|
||||||
|
*/
|
||||||
|
#define I915_RELOC_HEADER 4
|
||||||
|
|
||||||
|
/*
|
||||||
|
* type 0 relocation has 4-uint32_t stride
|
||||||
|
* 0 - offset into buffer
|
||||||
|
* 1 - delta to add in
|
||||||
|
* 2 - index into buffer list
|
||||||
|
* 3 - reserved (for optimisations later).
|
||||||
|
*/
|
||||||
|
#define I915_RELOC_TYPE_0 0
|
||||||
|
#define I915_RELOC0_STRIDE 4
|
||||||
|
|
||||||
|
struct drm_i915_op_arg {
|
||||||
|
uint64_t next;
|
||||||
|
uint32_t reloc_handle;
|
||||||
|
int handled;
|
||||||
|
union {
|
||||||
|
struct drm_bo_op_req req;
|
||||||
|
struct drm_bo_arg_rep rep;
|
||||||
|
} d;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_i915_execbuffer {
|
||||||
|
uint64_t ops_list;
|
||||||
|
uint32_t num_buffers;
|
||||||
|
struct drm_i915_batchbuffer batch;
|
||||||
|
struct drm_fence_arg fence_arg;
|
||||||
|
};
|
||||||
|
|
||||||
#endif /* _I915_DRM_H_ */
|
#endif /* _I915_DRM_H_ */
|
||||||
|
|
|
@ -56,15 +56,20 @@
|
||||||
* 1.8: New ioctl for ARB_Occlusion_Query
|
* 1.8: New ioctl for ARB_Occlusion_Query
|
||||||
* 1.9: Usable page flipping and triple buffering
|
* 1.9: Usable page flipping and triple buffering
|
||||||
* 1.10: Plane/pipe disentangling
|
* 1.10: Plane/pipe disentangling
|
||||||
|
* 1.11: TTM superioctl
|
||||||
*/
|
*/
|
||||||
#define DRIVER_MAJOR 1
|
#define DRIVER_MAJOR 1
|
||||||
#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
|
#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
|
||||||
#define DRIVER_MINOR 10
|
#define DRIVER_MINOR 11
|
||||||
#else
|
#else
|
||||||
#define DRIVER_MINOR 6
|
#define DRIVER_MINOR 6
|
||||||
#endif
|
#endif
|
||||||
#define DRIVER_PATCHLEVEL 0
|
#define DRIVER_PATCHLEVEL 0
|
||||||
|
|
||||||
|
#ifdef I915_HAVE_BUFFER
|
||||||
|
#define I915_MAX_VALIDATE_BUFFERS 4096
|
||||||
|
#endif
|
||||||
|
|
||||||
struct drm_i915_ring_buffer {
|
struct drm_i915_ring_buffer {
|
||||||
int tail_mask;
|
int tail_mask;
|
||||||
unsigned long Start;
|
unsigned long Start;
|
||||||
|
@ -137,7 +142,9 @@ struct drm_i915_private {
|
||||||
#endif
|
#endif
|
||||||
#ifdef I915_HAVE_BUFFER
|
#ifdef I915_HAVE_BUFFER
|
||||||
void *agp_iomap;
|
void *agp_iomap;
|
||||||
|
unsigned int max_validate_buffers;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
DRM_SPINTYPE swaps_lock;
|
DRM_SPINTYPE swaps_lock;
|
||||||
struct drm_i915_vbl_swap vbl_swaps;
|
struct drm_i915_vbl_swap vbl_swaps;
|
||||||
unsigned int swaps_pending;
|
unsigned int swaps_pending;
|
||||||
|
@ -284,7 +291,8 @@ extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t f
|
||||||
#ifdef I915_HAVE_BUFFER
|
#ifdef I915_HAVE_BUFFER
|
||||||
/* i915_buffer.c */
|
/* i915_buffer.c */
|
||||||
extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
|
extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
|
||||||
extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *type);
|
extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
|
||||||
|
uint32_t *type);
|
||||||
extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
||||||
extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
|
extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
|
||||||
struct drm_mem_type_manager *man);
|
struct drm_mem_type_manager *man);
|
||||||
|
|
|
@ -704,7 +704,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER);
|
vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
|
||||||
|
|
||||||
if (!vbl_swap) {
|
if (!vbl_swap) {
|
||||||
DRM_ERROR("Failed to allocate memory to queue swap\n");
|
DRM_ERROR("Failed to allocate memory to queue swap\n");
|
||||||
|
|
|
@ -123,12 +123,9 @@ enum nouveau_card_type {
|
||||||
NV_05 =5,
|
NV_05 =5,
|
||||||
NV_10 =10,
|
NV_10 =10,
|
||||||
NV_11 =11,
|
NV_11 =11,
|
||||||
NV_15 =11,
|
|
||||||
NV_17 =17,
|
NV_17 =17,
|
||||||
NV_20 =20,
|
NV_20 =20,
|
||||||
NV_25 =20,
|
|
||||||
NV_30 =30,
|
NV_30 =30,
|
||||||
NV_34 =30,
|
|
||||||
NV_40 =40,
|
NV_40 =40,
|
||||||
NV_44 =44,
|
NV_44 =44,
|
||||||
NV_50 =50,
|
NV_50 =50,
|
||||||
|
|
|
@ -120,8 +120,9 @@ struct nouveau_channel
|
||||||
struct nouveau_gpuobj_ref *ramfc;
|
struct nouveau_gpuobj_ref *ramfc;
|
||||||
|
|
||||||
/* PGRAPH context */
|
/* PGRAPH context */
|
||||||
|
/* XXX may be merge 2 pointers as private data ??? */
|
||||||
struct nouveau_gpuobj_ref *ramin_grctx;
|
struct nouveau_gpuobj_ref *ramin_grctx;
|
||||||
uint32_t pgraph_ctx [340]; /* XXX dynamic alloc ? */
|
void *pgraph_ctx;
|
||||||
|
|
||||||
/* NV50 VM */
|
/* NV50 VM */
|
||||||
struct nouveau_gpuobj *vm_pd;
|
struct nouveau_gpuobj *vm_pd;
|
||||||
|
@ -490,21 +491,13 @@ extern int nv10_graph_load_context(struct nouveau_channel *);
|
||||||
extern int nv10_graph_save_context(struct nouveau_channel *);
|
extern int nv10_graph_save_context(struct nouveau_channel *);
|
||||||
|
|
||||||
/* nv20_graph.c */
|
/* nv20_graph.c */
|
||||||
extern void nouveau_nv20_context_switch(struct drm_device *);
|
|
||||||
extern int nv20_graph_init(struct drm_device *);
|
|
||||||
extern void nv20_graph_takedown(struct drm_device *);
|
|
||||||
extern int nv20_graph_create_context(struct nouveau_channel *);
|
extern int nv20_graph_create_context(struct nouveau_channel *);
|
||||||
extern void nv20_graph_destroy_context(struct nouveau_channel *);
|
extern void nv20_graph_destroy_context(struct nouveau_channel *);
|
||||||
extern int nv20_graph_load_context(struct nouveau_channel *);
|
extern int nv20_graph_load_context(struct nouveau_channel *);
|
||||||
extern int nv20_graph_save_context(struct nouveau_channel *);
|
extern int nv20_graph_save_context(struct nouveau_channel *);
|
||||||
|
extern int nv20_graph_init(struct drm_device *);
|
||||||
/* nv30_graph.c */
|
extern void nv20_graph_takedown(struct drm_device *);
|
||||||
extern int nv30_graph_init(struct drm_device *);
|
extern int nv30_graph_init(struct drm_device *);
|
||||||
extern void nv30_graph_takedown(struct drm_device *);
|
|
||||||
extern int nv30_graph_create_context(struct nouveau_channel *);
|
|
||||||
extern void nv30_graph_destroy_context(struct nouveau_channel *);
|
|
||||||
extern int nv30_graph_load_context(struct nouveau_channel *);
|
|
||||||
extern int nv30_graph_save_context(struct nouveau_channel *);
|
|
||||||
|
|
||||||
/* nv40_graph.c */
|
/* nv40_graph.c */
|
||||||
extern int nv40_graph_init(struct drm_device *);
|
extern int nv40_graph_init(struct drm_device *);
|
||||||
|
|
|
@ -403,7 +403,19 @@ void nouveau_fifo_free(struct nouveau_channel *chan)
|
||||||
|
|
||||||
/* disable the fifo caches */
|
/* disable the fifo caches */
|
||||||
NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
|
NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
|
||||||
|
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
|
||||||
|
NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
|
||||||
|
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
|
||||||
|
|
||||||
|
/* stop the fifo, otherwise it could be running and
|
||||||
|
* it will crash when removing gpu objects */
|
||||||
|
if (dev_priv->card_type < NV_50) {
|
||||||
|
NV_WRITE(NV03_FIFO_REGS_DMAPUT(chan->id), chan->pushbuf_base);
|
||||||
|
NV_WRITE(NV03_FIFO_REGS_DMAGET(chan->id), chan->pushbuf_base);
|
||||||
|
} else {
|
||||||
|
NV_WRITE(NV50_FIFO_REGS_DMAPUT(chan->id), chan->pushbuf_base);
|
||||||
|
NV_WRITE(NV50_FIFO_REGS_DMAGET(chan->id), chan->pushbuf_base);
|
||||||
|
}
|
||||||
// FIXME XXX needs more code
|
// FIXME XXX needs more code
|
||||||
|
|
||||||
engine->fifo.destroy_context(chan);
|
engine->fifo.destroy_context(chan);
|
||||||
|
@ -412,6 +424,10 @@ void nouveau_fifo_free(struct nouveau_channel *chan)
|
||||||
engine->graph.destroy_context(chan);
|
engine->graph.destroy_context(chan);
|
||||||
|
|
||||||
/* reenable the fifo caches */
|
/* reenable the fifo caches */
|
||||||
|
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
|
||||||
|
NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
|
||||||
|
NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
|
||||||
|
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
|
||||||
NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
|
NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
|
||||||
|
|
||||||
/* Deallocate push buffer */
|
/* Deallocate push buffer */
|
||||||
|
|
|
@ -35,8 +35,10 @@
|
||||||
#include "nouveau_drm.h"
|
#include "nouveau_drm.h"
|
||||||
#include "nouveau_drv.h"
|
#include "nouveau_drv.h"
|
||||||
#include "nouveau_reg.h"
|
#include "nouveau_reg.h"
|
||||||
|
#include "nouveau_swmthd.h"
|
||||||
|
|
||||||
void nouveau_irq_preinstall(struct drm_device *dev)
|
void
|
||||||
|
nouveau_irq_preinstall(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
@ -44,7 +46,8 @@ void nouveau_irq_preinstall(struct drm_device *dev)
|
||||||
NV_WRITE(NV03_PMC_INTR_EN_0, 0);
|
NV_WRITE(NV03_PMC_INTR_EN_0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nouveau_irq_postinstall(struct drm_device *dev)
|
void
|
||||||
|
nouveau_irq_postinstall(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
@ -52,7 +55,8 @@ void nouveau_irq_postinstall(struct drm_device *dev)
|
||||||
NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
|
NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nouveau_irq_uninstall(struct drm_device *dev)
|
void
|
||||||
|
nouveau_irq_uninstall(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
@ -60,125 +64,86 @@ void nouveau_irq_uninstall(struct drm_device *dev)
|
||||||
NV_WRITE(NV03_PMC_INTR_EN_0, 0);
|
NV_WRITE(NV03_PMC_INTR_EN_0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nouveau_fifo_irq_handler(struct drm_device *dev)
|
static void
|
||||||
|
nouveau_fifo_irq_handler(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
uint32_t status, chmode, chstat, channel;
|
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
uint32_t status;
|
||||||
|
|
||||||
status = NV_READ(NV03_PFIFO_INTR_0);
|
while ((status = NV_READ(NV03_PFIFO_INTR_0))) {
|
||||||
if (!status)
|
uint32_t chid, get;
|
||||||
return;
|
|
||||||
chmode = NV_READ(NV04_PFIFO_MODE);
|
NV_WRITE(NV03_PFIFO_CACHES, 0);
|
||||||
chstat = NV_READ(NV04_PFIFO_DMA);
|
|
||||||
channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
|
chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1) &
|
||||||
|
(nouveau_fifo_number(dev) - 1);
|
||||||
|
get = NV_READ(NV03_PFIFO_CACHE1_GET);
|
||||||
|
|
||||||
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
|
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
|
||||||
uint32_t c1get, c1method, c1data;
|
uint32_t mthd, data;
|
||||||
|
int ptr;
|
||||||
|
|
||||||
DRM_ERROR("PFIFO error interrupt\n");
|
ptr = get >> 2;
|
||||||
|
|
||||||
c1get = NV_READ(NV03_PFIFO_CACHE1_GET) >> 2;
|
|
||||||
if (dev_priv->card_type < NV_40) {
|
if (dev_priv->card_type < NV_40) {
|
||||||
/* Untested, so it may not work.. */
|
mthd = NV_READ(NV04_PFIFO_CACHE1_METHOD(ptr));
|
||||||
c1method = NV_READ(NV04_PFIFO_CACHE1_METHOD(c1get));
|
data = NV_READ(NV04_PFIFO_CACHE1_DATA(ptr));
|
||||||
c1data = NV_READ(NV04_PFIFO_CACHE1_DATA(c1get));
|
|
||||||
} else {
|
} else {
|
||||||
c1method = NV_READ(NV40_PFIFO_CACHE1_METHOD(c1get));
|
mthd = NV_READ(NV40_PFIFO_CACHE1_METHOD(ptr));
|
||||||
c1data = NV_READ(NV40_PFIFO_CACHE1_DATA(c1get));
|
data = NV_READ(NV40_PFIFO_CACHE1_DATA(ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_ERROR("Channel %d/%d - Method 0x%04x, Data 0x%08x\n",
|
DRM_INFO("PFIFO_CACHE_ERROR - "
|
||||||
channel, (c1method >> 13) & 7, c1method & 0x1ffc,
|
"Ch %d/%d Mthd 0x%04x Data 0x%08x\n",
|
||||||
c1data);
|
chid, (mthd >> 13) & 7, mthd & 0x1ffc, data);
|
||||||
|
|
||||||
|
NV_WRITE(NV03_PFIFO_CACHE1_GET, get + 4);
|
||||||
|
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 1);
|
||||||
|
|
||||||
status &= ~NV_PFIFO_INTR_CACHE_ERROR;
|
status &= ~NV_PFIFO_INTR_CACHE_ERROR;
|
||||||
NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
|
NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
|
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
|
||||||
DRM_ERROR("PFIFO DMA pusher interrupt: ch%d, 0x%08x\n",
|
DRM_INFO("PFIFO_DMA_PUSHER - Ch %d\n", chid);
|
||||||
channel, NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
|
|
||||||
|
|
||||||
status &= ~NV_PFIFO_INTR_DMA_PUSHER;
|
status &= ~NV_PFIFO_INTR_DMA_PUSHER;
|
||||||
NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER);
|
NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER);
|
||||||
|
|
||||||
NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
|
NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
|
||||||
if (NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)!=NV_READ(NV04_PFIFO_CACHE1_DMA_GET))
|
if (NV_READ(NV04_PFIFO_CACHE1_DMA_PUT) != get)
|
||||||
{
|
NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, get + 4);
|
||||||
uint32_t getval=NV_READ(NV04_PFIFO_CACHE1_DMA_GET)+4;
|
|
||||||
NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET,getval);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status) {
|
if (status) {
|
||||||
DRM_ERROR("Unhandled PFIFO interrupt: status=0x%08x\n", status);
|
DRM_INFO("Unhandled PFIFO_INTR - 0x%8x\n", status);
|
||||||
|
|
||||||
NV_WRITE(NV03_PFIFO_INTR_0, status);
|
NV_WRITE(NV03_PFIFO_INTR_0, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NV_WRITE(NV03_PFIFO_CACHES, 1);
|
||||||
|
}
|
||||||
|
|
||||||
NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
|
NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
struct nouveau_bitfield_names {
|
||||||
static void nouveau_nv04_context_switch(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
||||||
uint32_t channel,i;
|
|
||||||
uint32_t max=0;
|
|
||||||
NV_WRITE(NV04_PGRAPH_FIFO,0x0);
|
|
||||||
channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
|
|
||||||
//DRM_INFO("raw PFIFO_CACH1_PHS1 reg is %x\n",NV_READ(NV03_PFIFO_CACHE1_PUSH1));
|
|
||||||
//DRM_INFO("currently on channel %d\n",channel);
|
|
||||||
for (i=0;i<nouveau_fifo_number(dev);i++)
|
|
||||||
if ((dev_priv->fifos[i].used)&&(i!=channel)) {
|
|
||||||
uint32_t put,get,pending;
|
|
||||||
//put=NV_READ(dev_priv->ramfc_offset+i*32);
|
|
||||||
//get=NV_READ(dev_priv->ramfc_offset+4+i*32);
|
|
||||||
put=NV_READ(NV03_FIFO_REGS_DMAPUT(i));
|
|
||||||
get=NV_READ(NV03_FIFO_REGS_DMAGET(i));
|
|
||||||
pending=NV_READ(NV04_PFIFO_DMA);
|
|
||||||
//DRM_INFO("Channel %d (put/get %x/%x)\n",i,put,get);
|
|
||||||
/* mark all pending channels as such */
|
|
||||||
if ((put!=get)&!(pending&(1<<i)))
|
|
||||||
{
|
|
||||||
pending|=(1<<i);
|
|
||||||
NV_WRITE(NV04_PFIFO_DMA,pending);
|
|
||||||
}
|
|
||||||
max++;
|
|
||||||
}
|
|
||||||
nouveau_wait_for_idle(dev);
|
|
||||||
|
|
||||||
#if 1
|
|
||||||
/* 2-channel commute */
|
|
||||||
// NV_WRITE(NV03_PFIFO_CACHE1_PUSH1,channel|0x100);
|
|
||||||
if (channel==0)
|
|
||||||
channel=1;
|
|
||||||
else
|
|
||||||
channel=0;
|
|
||||||
// dev_priv->cur_fifo=channel;
|
|
||||||
NV_WRITE(NV04_PFIFO_NEXT_CHANNEL,channel|0x100);
|
|
||||||
#endif
|
|
||||||
//NV_WRITE(NV03_PFIFO_CACHE1_PUSH1,max|0x100);
|
|
||||||
//NV_WRITE(0x2050,max|0x100);
|
|
||||||
|
|
||||||
NV_WRITE(NV04_PGRAPH_FIFO,0x1);
|
|
||||||
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
struct nouveau_bitfield_names
|
|
||||||
{
|
|
||||||
uint32_t mask;
|
uint32_t mask;
|
||||||
const char * name;
|
const char * name;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct nouveau_bitfield_names nouveau_nstatus_names[] =
|
static struct nouveau_bitfield_names nouveau_nstatus_names[] =
|
||||||
{
|
{
|
||||||
{ NV03_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
|
{ NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
|
||||||
{ NV03_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
|
{ NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
|
||||||
{ NV03_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
|
{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
|
||||||
{ NV03_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
|
{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct nouveau_bitfield_names nouveau_nstatus_names_nv10[] =
|
||||||
|
{
|
||||||
|
{ NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
|
||||||
|
{ NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
|
||||||
|
{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
|
||||||
|
{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct nouveau_bitfield_names nouveau_nsource_names[] =
|
static struct nouveau_bitfield_names nouveau_nsource_names[] =
|
||||||
|
@ -280,7 +245,7 @@ nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
nouveau_graph_dump_trap_info(struct drm_device *dev)
|
nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
uint32_t address;
|
uint32_t address;
|
||||||
|
@ -303,57 +268,80 @@ nouveau_graph_dump_trap_info(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
nsource = NV_READ(NV03_PGRAPH_NSOURCE);
|
nsource = NV_READ(NV03_PGRAPH_NSOURCE);
|
||||||
nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
|
nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
|
||||||
if (dev_priv->card_type < NV_50) {
|
if (dev_priv->card_type < NV_10) {
|
||||||
|
class = NV_READ(0x400180 + subc*4) & 0xFF;
|
||||||
|
} else if (dev_priv->card_type < NV_40) {
|
||||||
|
class = NV_READ(0x400160 + subc*4) & 0xFFF;
|
||||||
|
} else if (dev_priv->card_type < NV_50) {
|
||||||
class = NV_READ(0x400160 + subc*4) & 0xFFFF;
|
class = NV_READ(0x400160 + subc*4) & 0xFFFF;
|
||||||
} else {
|
} else {
|
||||||
class = NV_READ(0x400814);
|
class = NV_READ(0x400814);
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_ERROR("nSource:");
|
DRM_INFO("%s - nSource:", id);
|
||||||
nouveau_print_bitfield_names(nsource, nouveau_nsource_names,
|
nouveau_print_bitfield_names(nsource, nouveau_nsource_names,
|
||||||
ARRAY_SIZE(nouveau_nsource_names));
|
ARRAY_SIZE(nouveau_nsource_names));
|
||||||
printk(", nStatus:");
|
printk(", nStatus:");
|
||||||
|
if (dev_priv->card_type < NV_10)
|
||||||
nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names,
|
nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names,
|
||||||
ARRAY_SIZE(nouveau_nstatus_names));
|
ARRAY_SIZE(nouveau_nstatus_names));
|
||||||
|
else
|
||||||
|
nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names_nv10,
|
||||||
|
ARRAY_SIZE(nouveau_nstatus_names_nv10));
|
||||||
printk("\n");
|
printk("\n");
|
||||||
|
|
||||||
DRM_ERROR("Channel %d/%d (class 0x%04x) - Method 0x%04x, Data 0x%08x:0x%08x\n",
|
DRM_INFO("%s - Ch %d/%d Class 0x%04x Mthd 0x%04x Data 0x%08x:0x%08x\n",
|
||||||
channel, subc, class, method, data2, data);
|
id, channel, subc, class, method, data2, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nouveau_pgraph_irq_handler(struct drm_device *dev)
|
static inline void
|
||||||
|
nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
uint32_t status, nsource;
|
int handled = 0;
|
||||||
|
|
||||||
status = NV_READ(NV03_PGRAPH_INTR);
|
|
||||||
if (!status)
|
|
||||||
return;
|
|
||||||
nsource = NV_READ(NV03_PGRAPH_NSOURCE);
|
|
||||||
|
|
||||||
if (status & NV_PGRAPH_INTR_NOTIFY) {
|
|
||||||
DRM_DEBUG("PGRAPH notify interrupt\n");
|
DRM_DEBUG("PGRAPH notify interrupt\n");
|
||||||
|
if (dev_priv->card_type == NV_04 &&
|
||||||
|
(nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
|
||||||
|
uint32_t class, mthd;
|
||||||
|
|
||||||
nouveau_graph_dump_trap_info(dev);
|
/* NV4 (nvidia TNT 1) reports software methods with
|
||||||
|
* PGRAPH NOTIFY ILLEGAL_MTHD
|
||||||
|
*/
|
||||||
|
mthd = NV_READ(NV04_PGRAPH_TRAPPED_ADDR) & 0x1FFC;
|
||||||
|
class = NV_READ(NV04_PGRAPH_CTX_SWITCH1) & 0xFFF;
|
||||||
|
DRM_DEBUG("Got NV04 software method method %x for class %#x\n",
|
||||||
|
mthd, class);
|
||||||
|
|
||||||
status &= ~NV_PGRAPH_INTR_NOTIFY;
|
if (nouveau_sw_method_execute(dev, class, mthd)) {
|
||||||
NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
|
DRM_ERROR("Unable to execute NV04 software method %x "
|
||||||
|
"for object class %x. Please report.\n",
|
||||||
|
mthd, class);
|
||||||
|
} else {
|
||||||
|
handled = 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status & NV_PGRAPH_INTR_ERROR) {
|
if (!handled)
|
||||||
DRM_ERROR("PGRAPH error interrupt\n");
|
nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY");
|
||||||
|
|
||||||
nouveau_graph_dump_trap_info(dev);
|
|
||||||
|
|
||||||
status &= ~NV_PGRAPH_INTR_ERROR;
|
|
||||||
NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
|
static inline void
|
||||||
uint32_t channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
|
nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
|
||||||
DRM_DEBUG("PGRAPH context switch interrupt channel %x\n",channel);
|
|
||||||
switch(dev_priv->card_type)
|
|
||||||
{
|
{
|
||||||
|
nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR");
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
nouveau_pgraph_intr_context_switch(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
uint32_t chid;
|
||||||
|
|
||||||
|
chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1) & (nouveau_fifo_number(dev)-1);
|
||||||
|
DRM_DEBUG("PGRAPH context switch interrupt channel %x\n", chid);
|
||||||
|
|
||||||
|
switch(dev_priv->card_type) {
|
||||||
case NV_04:
|
case NV_04:
|
||||||
case NV_05:
|
case NV_05:
|
||||||
nouveau_nv04_context_switch(dev);
|
nouveau_nv04_context_switch(dev);
|
||||||
|
@ -363,28 +351,57 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev)
|
||||||
case NV_17:
|
case NV_17:
|
||||||
nouveau_nv10_context_switch(dev);
|
nouveau_nv10_context_switch(dev);
|
||||||
break;
|
break;
|
||||||
case NV_20:
|
|
||||||
case NV_30:
|
|
||||||
nouveau_nv20_context_switch(dev);
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
DRM_ERROR("Context switch not implemented\n");
|
DRM_ERROR("Context switch not implemented\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
nouveau_pgraph_irq_handler(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
uint32_t status;
|
||||||
|
|
||||||
|
while ((status = NV_READ(NV03_PGRAPH_INTR))) {
|
||||||
|
uint32_t nsource = NV_READ(NV03_PGRAPH_NSOURCE);
|
||||||
|
|
||||||
|
if (status & NV_PGRAPH_INTR_NOTIFY) {
|
||||||
|
nouveau_pgraph_intr_notify(dev, nsource);
|
||||||
|
|
||||||
|
status &= ~NV_PGRAPH_INTR_NOTIFY;
|
||||||
|
NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (status & NV_PGRAPH_INTR_ERROR) {
|
||||||
|
nouveau_pgraph_intr_error(dev, nsource);
|
||||||
|
|
||||||
|
status &= ~NV_PGRAPH_INTR_ERROR;
|
||||||
|
NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
|
||||||
|
nouveau_pgraph_intr_context_switch(dev);
|
||||||
|
|
||||||
status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
|
status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
|
||||||
NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
|
NV_WRITE(NV03_PGRAPH_INTR,
|
||||||
|
NV_PGRAPH_INTR_CONTEXT_SWITCH);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status) {
|
if (status) {
|
||||||
DRM_ERROR("Unhandled PGRAPH interrupt: STAT=0x%08x\n", status);
|
DRM_INFO("Unhandled PGRAPH_INTR - 0x%8x\n", status);
|
||||||
NV_WRITE(NV03_PGRAPH_INTR, status);
|
NV_WRITE(NV03_PGRAPH_INTR, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((NV_READ(NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
|
||||||
|
NV_WRITE(NV04_PGRAPH_FIFO, 1);
|
||||||
|
}
|
||||||
|
|
||||||
NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
|
NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
|
static void
|
||||||
|
nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
@ -397,7 +414,8 @@ static void nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS)
|
irqreturn_t
|
||||||
|
nouveau_irq_handler(DRM_IRQ_ARGS)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = (struct drm_device*)arg;
|
struct drm_device *dev = (struct drm_device*)arg;
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
|
|
@ -430,7 +430,7 @@ int nouveau_mem_init(struct drm_device *dev)
|
||||||
sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone
|
sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone
|
||||||
|
|
||||||
if (drm_sg_alloc(dev, &sgreq)) {
|
if (drm_sg_alloc(dev, &sgreq)) {
|
||||||
DRM_ERROR("Unable to allocate %dMB of scatter-gather"
|
DRM_ERROR("Unable to allocate %ldMB of scatter-gather"
|
||||||
" pages for PCI DMA!",sgreq.size>>20);
|
" pages for PCI DMA!",sgreq.size>>20);
|
||||||
} else {
|
} else {
|
||||||
if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,
|
if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,
|
||||||
|
|
|
@ -33,20 +33,10 @@ int
|
||||||
nouveau_notifier_init_channel(struct nouveau_channel *chan)
|
nouveau_notifier_init_channel(struct nouveau_channel *chan)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = chan->dev;
|
struct drm_device *dev = chan->dev;
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
||||||
int flags, ret;
|
int flags, ret;
|
||||||
|
|
||||||
/*TODO: PCI notifier blocks */
|
flags = (NOUVEAU_MEM_PCI | NOUVEAU_MEM_MAPPED |
|
||||||
#ifndef __powerpc__
|
NOUVEAU_MEM_FB_ACCEPTABLE);
|
||||||
if (dev_priv->agp_heap)
|
|
||||||
flags = NOUVEAU_MEM_AGP;
|
|
||||||
else
|
|
||||||
#endif
|
|
||||||
if (dev_priv->pci_heap)
|
|
||||||
flags = NOUVEAU_MEM_PCI;
|
|
||||||
else
|
|
||||||
flags = NOUVEAU_MEM_FB;
|
|
||||||
flags |= (NOUVEAU_MEM_MAPPED | NOUVEAU_MEM_FB_ACCEPTABLE);
|
|
||||||
|
|
||||||
chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,
|
chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,
|
||||||
(struct drm_file *)-2);
|
(struct drm_file *)-2);
|
||||||
|
|
|
@ -57,6 +57,7 @@
|
||||||
# define NV50_FIFO_REGS_DMAGET(i) (NV50_FIFO_REGS(i)+0x44)
|
# define NV50_FIFO_REGS_DMAGET(i) (NV50_FIFO_REGS(i)+0x44)
|
||||||
|
|
||||||
#define NV03_PMC_BOOT_0 0x00000000
|
#define NV03_PMC_BOOT_0 0x00000000
|
||||||
|
#define NV03_PMC_BOOT_1 0x00000004
|
||||||
#define NV03_PMC_INTR_0 0x00000100
|
#define NV03_PMC_INTR_0 0x00000100
|
||||||
# define NV_PMC_INTR_0_PFIFO_PENDING (1<< 8)
|
# define NV_PMC_INTR_0_PFIFO_PENDING (1<< 8)
|
||||||
# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12)
|
# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12)
|
||||||
|
@ -118,10 +119,14 @@
|
||||||
#define NV10_PGRAPH_DEBUG_4 0x00400090
|
#define NV10_PGRAPH_DEBUG_4 0x00400090
|
||||||
#define NV03_PGRAPH_INTR 0x00400100
|
#define NV03_PGRAPH_INTR 0x00400100
|
||||||
#define NV03_PGRAPH_NSTATUS 0x00400104
|
#define NV03_PGRAPH_NSTATUS 0x00400104
|
||||||
# define NV03_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
|
# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11)
|
||||||
# define NV03_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
|
# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12)
|
||||||
# define NV03_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
|
# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13)
|
||||||
# define NV03_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
|
# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14)
|
||||||
|
# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
|
||||||
|
# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
|
||||||
|
# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
|
||||||
|
# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
|
||||||
#define NV03_PGRAPH_NSOURCE 0x00400108
|
#define NV03_PGRAPH_NSOURCE 0x00400108
|
||||||
# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<< 0)
|
# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<< 0)
|
||||||
# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<< 1)
|
# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<< 1)
|
||||||
|
@ -286,10 +291,8 @@
|
||||||
#define NV10_PGRAPH_DMA_PITCH 0x00400770
|
#define NV10_PGRAPH_DMA_PITCH 0x00400770
|
||||||
#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
|
#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
|
||||||
#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
|
#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
|
||||||
#define NV10_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
|
#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
|
||||||
#define NV10_PGRAPH_CHANNEL_CTX_SIZE 0x00400784
|
|
||||||
#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
|
#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
|
||||||
#define NV10_PGRAPH_CHANNEL_CTX_POINTER 0x00400788
|
|
||||||
#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
|
#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
|
||||||
#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
|
#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
|
||||||
#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
|
#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
|
||||||
|
|
|
@ -192,11 +192,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||||
engine->fb.init = nv10_fb_init;
|
engine->fb.init = nv10_fb_init;
|
||||||
engine->fb.takedown = nv10_fb_takedown;
|
engine->fb.takedown = nv10_fb_takedown;
|
||||||
engine->graph.init = nv30_graph_init;
|
engine->graph.init = nv30_graph_init;
|
||||||
engine->graph.takedown = nv30_graph_takedown;
|
engine->graph.takedown = nv20_graph_takedown;
|
||||||
engine->graph.create_context = nv30_graph_create_context;
|
engine->graph.create_context = nv20_graph_create_context;
|
||||||
engine->graph.destroy_context = nv30_graph_destroy_context;
|
engine->graph.destroy_context = nv20_graph_destroy_context;
|
||||||
engine->graph.load_context = nv30_graph_load_context;
|
engine->graph.load_context = nv20_graph_load_context;
|
||||||
engine->graph.save_context = nv30_graph_save_context;
|
engine->graph.save_context = nv20_graph_save_context;
|
||||||
engine->fifo.init = nouveau_fifo_init;
|
engine->fifo.init = nouveau_fifo_init;
|
||||||
engine->fifo.takedown = nouveau_stub_takedown;
|
engine->fifo.takedown = nouveau_stub_takedown;
|
||||||
engine->fifo.create_context = nv10_fifo_create_context;
|
engine->fifo.create_context = nv10_fifo_create_context;
|
||||||
|
@ -283,6 +283,12 @@ nouveau_card_init(struct drm_device *dev)
|
||||||
ret = nouveau_init_card_mappings(dev);
|
ret = nouveau_init_card_mappings(dev);
|
||||||
if (ret) return ret;
|
if (ret) return ret;
|
||||||
|
|
||||||
|
/* Put the card in BE mode if it's not */
|
||||||
|
if (NV_READ(NV03_PMC_BOOT_1))
|
||||||
|
NV_WRITE(NV03_PMC_BOOT_1,0x00000001);
|
||||||
|
|
||||||
|
DRM_MEMORYBARRIER();
|
||||||
|
|
||||||
/* Determine exact chipset we're running on */
|
/* Determine exact chipset we're running on */
|
||||||
if (dev_priv->card_type < NV_10)
|
if (dev_priv->card_type < NV_10)
|
||||||
dev_priv->chipset = dev_priv->card_type;
|
dev_priv->chipset = dev_priv->card_type;
|
||||||
|
@ -403,19 +409,79 @@ int nouveau_firstopen(struct drm_device *dev)
|
||||||
int nouveau_load(struct drm_device *dev, unsigned long flags)
|
int nouveau_load(struct drm_device *dev, unsigned long flags)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv;
|
struct drm_nouveau_private *dev_priv;
|
||||||
|
void __iomem *regs;
|
||||||
if (flags==NV_UNKNOWN)
|
uint32_t reg0,reg1;
|
||||||
return -EINVAL;
|
uint8_t architecture = 0;
|
||||||
|
|
||||||
dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
|
dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
|
||||||
if (!dev_priv)
|
if (!dev_priv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
dev_priv->card_type=flags&NOUVEAU_FAMILY;
|
|
||||||
dev_priv->flags = flags & NOUVEAU_FLAGS;
|
dev_priv->flags = flags & NOUVEAU_FLAGS;
|
||||||
dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
|
dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
|
||||||
|
|
||||||
|
DRM_DEBUG("vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class);
|
||||||
|
|
||||||
|
/* Time to determine the card architecture */
|
||||||
|
regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x8);
|
||||||
|
if (!regs) {
|
||||||
|
DRM_ERROR("Could not ioremap to determine register\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
reg0 = readl(regs+NV03_PMC_BOOT_0);
|
||||||
|
reg1 = readl(regs+NV03_PMC_BOOT_1);
|
||||||
|
if (reg1)
|
||||||
|
reg0=___swab32(reg0);
|
||||||
|
|
||||||
|
/* We're dealing with >=NV10 */
|
||||||
|
if ((reg0 & 0x0f000000) > 0 ) {
|
||||||
|
/* Bit 27-20 contain the architecture in hex */
|
||||||
|
architecture = (reg0 & 0xff00000) >> 20;
|
||||||
|
/* NV04 or NV05 */
|
||||||
|
} else if ((reg0 & 0xff00fff0) == 0x20004000) {
|
||||||
|
architecture = 0x04;
|
||||||
|
}
|
||||||
|
|
||||||
|
iounmap(regs);
|
||||||
|
|
||||||
|
if (architecture >= 0x50) {
|
||||||
|
dev_priv->card_type = NV_50;
|
||||||
|
} else if (architecture >= 0x44) {
|
||||||
|
dev_priv->card_type = NV_44;
|
||||||
|
} else if (architecture >= 0x40) {
|
||||||
|
dev_priv->card_type = NV_40;
|
||||||
|
} else if (architecture >= 0x30) {
|
||||||
|
dev_priv->card_type = NV_30;
|
||||||
|
} else if (architecture >= 0x20) {
|
||||||
|
dev_priv->card_type = NV_20;
|
||||||
|
} else if (architecture >= 0x17) {
|
||||||
|
dev_priv->card_type = NV_17;
|
||||||
|
} else if (architecture >= 0x11) {
|
||||||
|
dev_priv->card_type = NV_11;
|
||||||
|
} else if (architecture >= 0x10) {
|
||||||
|
dev_priv->card_type = NV_10;
|
||||||
|
} else if (architecture >= 0x04) {
|
||||||
|
dev_priv->card_type = NV_04;
|
||||||
|
} else {
|
||||||
|
dev_priv->card_type = NV_UNKNOWN;
|
||||||
|
}
|
||||||
|
|
||||||
|
DRM_INFO("Detected an NV%d generation card (0x%08x)\n", dev_priv->card_type,reg0);
|
||||||
|
|
||||||
|
if (dev_priv->card_type == NV_UNKNOWN) {
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Special flags */
|
||||||
|
if (dev->pci_device == 0x01a0) {
|
||||||
|
dev_priv->flags |= NV_NFORCE;
|
||||||
|
} else if (dev->pci_device == 0x01f0) {
|
||||||
|
dev_priv->flags |= NV_NFORCE2;
|
||||||
|
}
|
||||||
|
|
||||||
dev->dev_private = (void *)dev_priv;
|
dev->dev_private = (void *)dev_priv;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -423,6 +489,8 @@ void nouveau_lastclose(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
/* In the case of an error dev_priv may not be be allocated yet */
|
||||||
|
if (dev_priv && dev_priv->card_type) {
|
||||||
nouveau_card_takedown(dev);
|
nouveau_card_takedown(dev);
|
||||||
|
|
||||||
if(dev_priv->fb_mtrr>0)
|
if(dev_priv->fb_mtrr>0)
|
||||||
|
@ -431,6 +499,7 @@ void nouveau_lastclose(struct drm_device *dev)
|
||||||
dev_priv->fb_mtrr=0;
|
dev_priv->fb_mtrr=0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int nouveau_unload(struct drm_device *dev)
|
int nouveau_unload(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
|
|
|
@ -0,0 +1,193 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2007 Arthur Huillet.
|
||||||
|
*
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
* a copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial
|
||||||
|
* portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||||
|
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
||||||
|
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Authors:
|
||||||
|
* Arthur Huillet <arthur.huillet AT free DOT fr>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "drmP.h"
|
||||||
|
#include "drm.h"
|
||||||
|
#include "nouveau_drm.h"
|
||||||
|
#include "nouveau_drv.h"
|
||||||
|
#include "nouveau_reg.h"
|
||||||
|
|
||||||
|
/*TODO: add a "card_type" attribute*/
|
||||||
|
typedef struct{
|
||||||
|
uint32_t oclass; /* object class for this software method */
|
||||||
|
uint32_t mthd; /* method number */
|
||||||
|
void (*method_code)(struct drm_device *dev, uint32_t oclass, uint32_t mthd); /* pointer to the function that does the work */
|
||||||
|
} nouveau_software_method_t;
|
||||||
|
|
||||||
|
|
||||||
|
/* This function handles the NV04 setcontext software methods.
|
||||||
|
One function for all because they are very similar.*/
|
||||||
|
static void nouveau_NV04_setcontext_sw_method(struct drm_device *dev, uint32_t oclass, uint32_t mthd) {
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
uint32_t inst_loc = NV_READ(NV04_PGRAPH_CTX_SWITCH4) & 0xFFFF;
|
||||||
|
uint32_t value_to_set = 0, bit_to_set = 0;
|
||||||
|
|
||||||
|
switch ( oclass ) {
|
||||||
|
case 0x4a:
|
||||||
|
switch ( mthd ) {
|
||||||
|
case 0x188 :
|
||||||
|
case 0x18c :
|
||||||
|
bit_to_set = 0;
|
||||||
|
break;
|
||||||
|
case 0x198 :
|
||||||
|
bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
|
||||||
|
break;
|
||||||
|
case 0x2fc :
|
||||||
|
bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
|
||||||
|
break;
|
||||||
|
default : ;
|
||||||
|
};
|
||||||
|
break;
|
||||||
|
case 0x5c:
|
||||||
|
switch ( mthd ) {
|
||||||
|
case 0x184:
|
||||||
|
bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/
|
||||||
|
break;
|
||||||
|
case 0x188:
|
||||||
|
case 0x18c:
|
||||||
|
bit_to_set = 0;
|
||||||
|
break;
|
||||||
|
case 0x198:
|
||||||
|
bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
|
||||||
|
break;
|
||||||
|
case 0x2fc :
|
||||||
|
bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
break;
|
||||||
|
case 0x5f:
|
||||||
|
switch ( mthd ) {
|
||||||
|
case 0x184 :
|
||||||
|
bit_to_set = 1 << 12; /*CHROMA_KEY_ENABLE*/
|
||||||
|
break;
|
||||||
|
case 0x188 :
|
||||||
|
bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/
|
||||||
|
break;
|
||||||
|
case 0x18c :
|
||||||
|
case 0x190 :
|
||||||
|
bit_to_set = 0;
|
||||||
|
break;
|
||||||
|
case 0x19c :
|
||||||
|
bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
|
||||||
|
break;
|
||||||
|
case 0x2fc :
|
||||||
|
bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
break;
|
||||||
|
case 0x61:
|
||||||
|
switch ( mthd ) {
|
||||||
|
case 0x188 :
|
||||||
|
bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/
|
||||||
|
break;
|
||||||
|
case 0x18c :
|
||||||
|
case 0x190 :
|
||||||
|
bit_to_set = 0;
|
||||||
|
break;
|
||||||
|
case 0x19c :
|
||||||
|
bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
|
||||||
|
break;
|
||||||
|
case 0x2fc :
|
||||||
|
bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
break;
|
||||||
|
case 0x77:
|
||||||
|
switch ( mthd ) {
|
||||||
|
case 0x198 :
|
||||||
|
bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
|
||||||
|
break;
|
||||||
|
case 0x304 :
|
||||||
|
bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; //PATCH_CONFIG
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
break;
|
||||||
|
default :;
|
||||||
|
};
|
||||||
|
|
||||||
|
value_to_set = (NV_READ(0x00700000 | inst_loc << 4))| bit_to_set;
|
||||||
|
|
||||||
|
/*RAMIN*/
|
||||||
|
nouveau_wait_for_idle(dev);
|
||||||
|
NV_WRITE(0x00700000 | inst_loc << 4, value_to_set);
|
||||||
|
|
||||||
|
/*DRM_DEBUG("CTX_SWITCH1 value is %#x\n", NV_READ(NV04_PGRAPH_CTX_SWITCH1));*/
|
||||||
|
NV_WRITE(NV04_PGRAPH_CTX_SWITCH1, value_to_set);
|
||||||
|
|
||||||
|
/*DRM_DEBUG("CTX_CACHE1 + xxx value is %#x\n", NV_READ(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2)));*/
|
||||||
|
NV_WRITE(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2), value_to_set);
|
||||||
|
}
|
||||||
|
|
||||||
|
nouveau_software_method_t nouveau_sw_methods[] = {
|
||||||
|
/*NV04 context software methods*/
|
||||||
|
{ 0x4a, 0x188, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x4a, 0x18c, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x4a, 0x198, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x4a, 0x2fc, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x5c, 0x184, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x5c, 0x188, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x5c, 0x18c, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x5c, 0x198, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x5c, 0x2fc, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x5f, 0x184, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x5f, 0x188, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x5f, 0x18c, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x5f, 0x190, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x5f, 0x19c, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x5f, 0x2fc, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x61, 0x188, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x61, 0x18c, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x61, 0x190, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x61, 0x19c, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x61, 0x2fc, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x77, 0x198, nouveau_NV04_setcontext_sw_method },
|
||||||
|
{ 0x77, 0x304, nouveau_NV04_setcontext_sw_method },
|
||||||
|
/*terminator*/
|
||||||
|
{ 0x0, 0x0, NULL, },
|
||||||
|
};
|
||||||
|
|
||||||
|
int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method) {
|
||||||
|
int i = 0;
|
||||||
|
while ( nouveau_sw_methods[ i ] . method_code != NULL )
|
||||||
|
{
|
||||||
|
if ( nouveau_sw_methods[ i ] . oclass == oclass && nouveau_sw_methods[ i ] . mthd == method )
|
||||||
|
{
|
||||||
|
nouveau_sw_methods[ i ] . method_code(dev, oclass, method);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
i ++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2007 Arthur Huillet.
|
||||||
|
*
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
* a copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial
|
||||||
|
* portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||||
|
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
||||||
|
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Authors:
|
||||||
|
* Arthur Huillet <arthur.huillet AT free DOT fr>
|
||||||
|
*/
|
||||||
|
|
||||||
|
int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method); /* execute the given software method, returns 0 on success */
|
||||||
|
|
|
@ -346,6 +346,10 @@ static uint32_t nv04_graph_ctx_regs [] = {
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct graph_state {
|
||||||
|
int nv04[sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0])];
|
||||||
|
};
|
||||||
|
|
||||||
void nouveau_nv04_context_switch(struct drm_device *dev)
|
void nouveau_nv04_context_switch(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
@ -414,12 +418,17 @@ void nouveau_nv04_context_switch(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
int nv04_graph_create_context(struct nouveau_channel *chan) {
|
int nv04_graph_create_context(struct nouveau_channel *chan) {
|
||||||
|
struct graph_state* pgraph_ctx;
|
||||||
DRM_DEBUG("nv04_graph_context_create %d\n", chan->id);
|
DRM_DEBUG("nv04_graph_context_create %d\n", chan->id);
|
||||||
|
|
||||||
memset(chan->pgraph_ctx, 0, sizeof(chan->pgraph_ctx));
|
chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx),
|
||||||
|
DRM_MEM_DRIVER);
|
||||||
|
|
||||||
|
if (pgraph_ctx == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
//dev_priv->fifos[channel].pgraph_ctx_user = channel << 24;
|
//dev_priv->fifos[channel].pgraph_ctx_user = channel << 24;
|
||||||
chan->pgraph_ctx[0] = 0x0001ffff;
|
pgraph_ctx->nv04[0] = 0x0001ffff;
|
||||||
/* is it really needed ??? */
|
/* is it really needed ??? */
|
||||||
//dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4);
|
//dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4);
|
||||||
//dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0);
|
//dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0);
|
||||||
|
@ -429,16 +438,21 @@ int nv04_graph_create_context(struct nouveau_channel *chan) {
|
||||||
|
|
||||||
void nv04_graph_destroy_context(struct nouveau_channel *chan)
|
void nv04_graph_destroy_context(struct nouveau_channel *chan)
|
||||||
{
|
{
|
||||||
|
struct graph_state* pgraph_ctx = chan->pgraph_ctx;
|
||||||
|
|
||||||
|
drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER);
|
||||||
|
chan->pgraph_ctx = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nv04_graph_load_context(struct nouveau_channel *chan)
|
int nv04_graph_load_context(struct nouveau_channel *chan)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = chan->dev;
|
struct drm_device *dev = chan->dev;
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
struct graph_state* pgraph_ctx = chan->pgraph_ctx;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
|
for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
|
||||||
NV_WRITE(nv04_graph_ctx_regs[i], chan->pgraph_ctx[i]);
|
NV_WRITE(nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -447,10 +461,11 @@ int nv04_graph_save_context(struct nouveau_channel *chan)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = chan->dev;
|
struct drm_device *dev = chan->dev;
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
struct graph_state* pgraph_ctx = chan->pgraph_ctx;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
|
for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
|
||||||
chan->pgraph_ctx[i] = NV_READ(nv04_graph_ctx_regs[i]);
|
pgraph_ctx->nv04[i] = NV_READ(nv04_graph_ctx_regs[i]);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -467,20 +482,22 @@ int nv04_graph_init(struct drm_device *dev) {
|
||||||
NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF);
|
NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF);
|
||||||
NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
|
NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
|
||||||
|
|
||||||
// check the context is big enough
|
NV_WRITE(NV04_PGRAPH_VALID1, 0);
|
||||||
if ( sizeof(nv04_graph_ctx_regs)>sizeof(dev_priv->fifos[0]->pgraph_ctx) )
|
NV_WRITE(NV04_PGRAPH_VALID2, 0);
|
||||||
DRM_ERROR("pgraph_ctx too small\n");
|
/*NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF);
|
||||||
|
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
|
||||||
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF);
|
|
||||||
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000);
|
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000);
|
||||||
NV_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);
|
/*1231C000 blob, 001 haiku*/
|
||||||
NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870);
|
//*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
|
||||||
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x0004FF31);
|
NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x72111100);
|
||||||
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x4004FF31 |
|
/*0x72111100 blob , 01 haiku*/
|
||||||
(0x00D00000) |
|
/*NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
|
||||||
(1<<29) |
|
NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f071);
|
||||||
(1<<31));
|
/*haiku same*/
|
||||||
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31);
|
|
||||||
|
/*NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
|
||||||
|
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x10d4ff31);
|
||||||
|
/*haiku and blob 10d4*/
|
||||||
|
|
||||||
NV_WRITE(NV04_PGRAPH_STATE , 0xFFFFFFFF);
|
NV_WRITE(NV04_PGRAPH_STATE , 0xFFFFFFFF);
|
||||||
NV_WRITE(NV04_PGRAPH_CTX_CONTROL , 0x10010100);
|
NV_WRITE(NV04_PGRAPH_CTX_CONTROL , 0x10010100);
|
||||||
|
@ -496,4 +513,3 @@ int nv04_graph_init(struct drm_device *dev) {
|
||||||
void nv04_graph_takedown(struct drm_device *dev)
|
void nv04_graph_takedown(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,244 +42,6 @@ struct pipe_state {
|
||||||
uint32_t pipe_0x7800[0x0c0/4];
|
uint32_t pipe_0x7800[0x0c0/4];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* TODO dynamic allocation ??? */
|
|
||||||
static struct pipe_state pipe_state[NV10_FIFO_NUMBER];
|
|
||||||
|
|
||||||
static void nv10_graph_save_pipe(struct nouveau_channel *chan) {
|
|
||||||
struct drm_device *dev = chan->dev;
|
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
||||||
struct pipe_state *fifo_pipe_state = pipe_state + chan->id;
|
|
||||||
int i;
|
|
||||||
#define PIPE_SAVE(addr) \
|
|
||||||
do { \
|
|
||||||
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
|
|
||||||
for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
|
|
||||||
fifo_pipe_state->pipe_##addr[i] = NV_READ(NV10_PGRAPH_PIPE_DATA); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
PIPE_SAVE(0x4400);
|
|
||||||
PIPE_SAVE(0x0200);
|
|
||||||
PIPE_SAVE(0x6400);
|
|
||||||
PIPE_SAVE(0x6800);
|
|
||||||
PIPE_SAVE(0x6c00);
|
|
||||||
PIPE_SAVE(0x7000);
|
|
||||||
PIPE_SAVE(0x7400);
|
|
||||||
PIPE_SAVE(0x7800);
|
|
||||||
PIPE_SAVE(0x0040);
|
|
||||||
PIPE_SAVE(0x0000);
|
|
||||||
|
|
||||||
#undef PIPE_SAVE
|
|
||||||
}
|
|
||||||
|
|
||||||
static void nv10_graph_load_pipe(struct nouveau_channel *chan) {
|
|
||||||
struct drm_device *dev = chan->dev;
|
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
||||||
struct pipe_state *fifo_pipe_state = pipe_state + chan->id;
|
|
||||||
int i;
|
|
||||||
uint32_t xfmode0, xfmode1;
|
|
||||||
#define PIPE_RESTORE(addr) \
|
|
||||||
do { \
|
|
||||||
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
|
|
||||||
for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
|
|
||||||
NV_WRITE(NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
|
|
||||||
nouveau_wait_for_idle(dev);
|
|
||||||
/* XXX check haiku comments */
|
|
||||||
xfmode0 = NV_READ(NV10_PGRAPH_XFMODE0);
|
|
||||||
xfmode1 = NV_READ(NV10_PGRAPH_XFMODE1);
|
|
||||||
NV_WRITE(NV10_PGRAPH_XFMODE0, 0x10000000);
|
|
||||||
NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000);
|
|
||||||
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
|
|
||||||
for (i = 0; i < 4; i++)
|
|
||||||
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
|
|
||||||
for (i = 0; i < 4; i++)
|
|
||||||
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
|
|
||||||
|
|
||||||
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
|
|
||||||
for (i = 0; i < 3; i++)
|
|
||||||
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
|
|
||||||
|
|
||||||
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
|
|
||||||
for (i = 0; i < 3; i++)
|
|
||||||
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
|
|
||||||
|
|
||||||
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
|
|
||||||
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000008);
|
|
||||||
|
|
||||||
|
|
||||||
PIPE_RESTORE(0x0200);
|
|
||||||
nouveau_wait_for_idle(dev);
|
|
||||||
|
|
||||||
/* restore XFMODE */
|
|
||||||
NV_WRITE(NV10_PGRAPH_XFMODE0, xfmode0);
|
|
||||||
NV_WRITE(NV10_PGRAPH_XFMODE1, xfmode1);
|
|
||||||
PIPE_RESTORE(0x6400);
|
|
||||||
PIPE_RESTORE(0x6800);
|
|
||||||
PIPE_RESTORE(0x6c00);
|
|
||||||
PIPE_RESTORE(0x7000);
|
|
||||||
PIPE_RESTORE(0x7400);
|
|
||||||
PIPE_RESTORE(0x7800);
|
|
||||||
PIPE_RESTORE(0x4400);
|
|
||||||
PIPE_RESTORE(0x0000);
|
|
||||||
PIPE_RESTORE(0x0040);
|
|
||||||
nouveau_wait_for_idle(dev);
|
|
||||||
|
|
||||||
#undef PIPE_RESTORE
|
|
||||||
}
|
|
||||||
|
|
||||||
static void nv10_graph_create_pipe(struct nouveau_channel *chan) {
|
|
||||||
struct pipe_state *fifo_pipe_state = pipe_state + chan->id;
|
|
||||||
uint32_t *fifo_pipe_state_addr;
|
|
||||||
int i;
|
|
||||||
#define PIPE_INIT(addr) \
|
|
||||||
do { \
|
|
||||||
fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
|
|
||||||
} while (0)
|
|
||||||
#define PIPE_INIT_END(addr) \
|
|
||||||
do { \
|
|
||||||
if (fifo_pipe_state_addr != \
|
|
||||||
sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr) \
|
|
||||||
DRM_ERROR("incomplete pipe init for 0x%x : %p/%p\n", addr, fifo_pipe_state_addr, \
|
|
||||||
sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr); \
|
|
||||||
} while (0)
|
|
||||||
#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
|
|
||||||
|
|
||||||
PIPE_INIT(0x0200);
|
|
||||||
for (i = 0; i < 48; i++)
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
PIPE_INIT_END(0x0200);
|
|
||||||
|
|
||||||
PIPE_INIT(0x6400);
|
|
||||||
for (i = 0; i < 211; i++)
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x3f800000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x40000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x40000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x40000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x40000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x3f800000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x3f000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x3f000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x3f800000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x3f800000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x3f800000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x3f800000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x3f800000);
|
|
||||||
PIPE_INIT_END(0x6400);
|
|
||||||
|
|
||||||
PIPE_INIT(0x6800);
|
|
||||||
for (i = 0; i < 162; i++)
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x3f800000);
|
|
||||||
for (i = 0; i < 25; i++)
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
PIPE_INIT_END(0x6800);
|
|
||||||
|
|
||||||
PIPE_INIT(0x6c00);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0xbf800000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
PIPE_INIT_END(0x6c00);
|
|
||||||
|
|
||||||
PIPE_INIT(0x7000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
|
||||||
for (i = 0; i < 35; i++)
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
PIPE_INIT_END(0x7000);
|
|
||||||
|
|
||||||
PIPE_INIT(0x7400);
|
|
||||||
for (i = 0; i < 48; i++)
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
PIPE_INIT_END(0x7400);
|
|
||||||
|
|
||||||
PIPE_INIT(0x7800);
|
|
||||||
for (i = 0; i < 48; i++)
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
PIPE_INIT_END(0x7800);
|
|
||||||
|
|
||||||
PIPE_INIT(0x4400);
|
|
||||||
for (i = 0; i < 32; i++)
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
PIPE_INIT_END(0x4400);
|
|
||||||
|
|
||||||
PIPE_INIT(0x0000);
|
|
||||||
for (i = 0; i < 16; i++)
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
PIPE_INIT_END(0x0000);
|
|
||||||
|
|
||||||
PIPE_INIT(0x0040);
|
|
||||||
for (i = 0; i < 4; i++)
|
|
||||||
NV_WRITE_PIPE_INIT(0x00000000);
|
|
||||||
PIPE_INIT_END(0x0040);
|
|
||||||
|
|
||||||
#undef PIPE_INIT
|
|
||||||
#undef PIPE_INIT_END
|
|
||||||
#undef NV_WRITE_PIPE_INIT
|
|
||||||
}
|
|
||||||
|
|
||||||
static int nv10_graph_ctx_regs [] = {
|
static int nv10_graph_ctx_regs [] = {
|
||||||
NV10_PGRAPH_CTX_SWITCH1,
|
NV10_PGRAPH_CTX_SWITCH1,
|
||||||
NV10_PGRAPH_CTX_SWITCH2,
|
NV10_PGRAPH_CTX_SWITCH2,
|
||||||
|
@ -623,20 +385,269 @@ NV10_PGRAPH_DEBUG_4,
|
||||||
0x00400a04,
|
0x00400a04,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct graph_state {
|
||||||
|
int nv10[sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0])];
|
||||||
|
int nv17[sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0])];
|
||||||
|
struct pipe_state pipe_state;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void nv10_graph_save_pipe(struct nouveau_channel *chan) {
|
||||||
|
struct drm_device *dev = chan->dev;
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
struct graph_state* pgraph_ctx = chan->pgraph_ctx;
|
||||||
|
struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
|
||||||
|
int i;
|
||||||
|
#define PIPE_SAVE(addr) \
|
||||||
|
do { \
|
||||||
|
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
|
||||||
|
for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
|
||||||
|
fifo_pipe_state->pipe_##addr[i] = NV_READ(NV10_PGRAPH_PIPE_DATA); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
PIPE_SAVE(0x4400);
|
||||||
|
PIPE_SAVE(0x0200);
|
||||||
|
PIPE_SAVE(0x6400);
|
||||||
|
PIPE_SAVE(0x6800);
|
||||||
|
PIPE_SAVE(0x6c00);
|
||||||
|
PIPE_SAVE(0x7000);
|
||||||
|
PIPE_SAVE(0x7400);
|
||||||
|
PIPE_SAVE(0x7800);
|
||||||
|
PIPE_SAVE(0x0040);
|
||||||
|
PIPE_SAVE(0x0000);
|
||||||
|
|
||||||
|
#undef PIPE_SAVE
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nv10_graph_load_pipe(struct nouveau_channel *chan) {
|
||||||
|
struct drm_device *dev = chan->dev;
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
struct graph_state* pgraph_ctx = chan->pgraph_ctx;
|
||||||
|
struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
|
||||||
|
int i;
|
||||||
|
uint32_t xfmode0, xfmode1;
|
||||||
|
#define PIPE_RESTORE(addr) \
|
||||||
|
do { \
|
||||||
|
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
|
||||||
|
for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
|
||||||
|
NV_WRITE(NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
|
nouveau_wait_for_idle(dev);
|
||||||
|
/* XXX check haiku comments */
|
||||||
|
xfmode0 = NV_READ(NV10_PGRAPH_XFMODE0);
|
||||||
|
xfmode1 = NV_READ(NV10_PGRAPH_XFMODE1);
|
||||||
|
NV_WRITE(NV10_PGRAPH_XFMODE0, 0x10000000);
|
||||||
|
NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000);
|
||||||
|
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
|
||||||
|
for (i = 0; i < 4; i++)
|
||||||
|
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
|
||||||
|
for (i = 0; i < 4; i++)
|
||||||
|
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
|
||||||
|
|
||||||
|
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
|
||||||
|
for (i = 0; i < 3; i++)
|
||||||
|
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
|
||||||
|
|
||||||
|
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
|
||||||
|
for (i = 0; i < 3; i++)
|
||||||
|
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
|
||||||
|
|
||||||
|
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
|
||||||
|
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000008);
|
||||||
|
|
||||||
|
|
||||||
|
PIPE_RESTORE(0x0200);
|
||||||
|
nouveau_wait_for_idle(dev);
|
||||||
|
|
||||||
|
/* restore XFMODE */
|
||||||
|
NV_WRITE(NV10_PGRAPH_XFMODE0, xfmode0);
|
||||||
|
NV_WRITE(NV10_PGRAPH_XFMODE1, xfmode1);
|
||||||
|
PIPE_RESTORE(0x6400);
|
||||||
|
PIPE_RESTORE(0x6800);
|
||||||
|
PIPE_RESTORE(0x6c00);
|
||||||
|
PIPE_RESTORE(0x7000);
|
||||||
|
PIPE_RESTORE(0x7400);
|
||||||
|
PIPE_RESTORE(0x7800);
|
||||||
|
PIPE_RESTORE(0x4400);
|
||||||
|
PIPE_RESTORE(0x0000);
|
||||||
|
PIPE_RESTORE(0x0040);
|
||||||
|
nouveau_wait_for_idle(dev);
|
||||||
|
|
||||||
|
#undef PIPE_RESTORE
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nv10_graph_create_pipe(struct nouveau_channel *chan) {
|
||||||
|
struct graph_state* pgraph_ctx = chan->pgraph_ctx;
|
||||||
|
struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
|
||||||
|
uint32_t *fifo_pipe_state_addr;
|
||||||
|
int i;
|
||||||
|
#define PIPE_INIT(addr) \
|
||||||
|
do { \
|
||||||
|
fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
|
||||||
|
} while (0)
|
||||||
|
#define PIPE_INIT_END(addr) \
|
||||||
|
do { \
|
||||||
|
if (fifo_pipe_state_addr != \
|
||||||
|
sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr) \
|
||||||
|
DRM_ERROR("incomplete pipe init for 0x%x : %p/%p\n", addr, fifo_pipe_state_addr, \
|
||||||
|
sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr); \
|
||||||
|
} while (0)
|
||||||
|
#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
|
||||||
|
|
||||||
|
PIPE_INIT(0x0200);
|
||||||
|
for (i = 0; i < 48; i++)
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
PIPE_INIT_END(0x0200);
|
||||||
|
|
||||||
|
PIPE_INIT(0x6400);
|
||||||
|
for (i = 0; i < 211; i++)
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x3f800000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x40000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x40000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x40000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x40000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x3f800000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x3f000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x3f000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x3f800000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x3f800000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x3f800000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x3f800000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x3f800000);
|
||||||
|
PIPE_INIT_END(0x6400);
|
||||||
|
|
||||||
|
PIPE_INIT(0x6800);
|
||||||
|
for (i = 0; i < 162; i++)
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x3f800000);
|
||||||
|
for (i = 0; i < 25; i++)
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
PIPE_INIT_END(0x6800);
|
||||||
|
|
||||||
|
PIPE_INIT(0x6c00);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0xbf800000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
PIPE_INIT_END(0x6c00);
|
||||||
|
|
||||||
|
PIPE_INIT(0x7000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
NV_WRITE_PIPE_INIT(0x7149f2ca);
|
||||||
|
for (i = 0; i < 35; i++)
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
PIPE_INIT_END(0x7000);
|
||||||
|
|
||||||
|
PIPE_INIT(0x7400);
|
||||||
|
for (i = 0; i < 48; i++)
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
PIPE_INIT_END(0x7400);
|
||||||
|
|
||||||
|
PIPE_INIT(0x7800);
|
||||||
|
for (i = 0; i < 48; i++)
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
PIPE_INIT_END(0x7800);
|
||||||
|
|
||||||
|
PIPE_INIT(0x4400);
|
||||||
|
for (i = 0; i < 32; i++)
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
PIPE_INIT_END(0x4400);
|
||||||
|
|
||||||
|
PIPE_INIT(0x0000);
|
||||||
|
for (i = 0; i < 16; i++)
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
PIPE_INIT_END(0x0000);
|
||||||
|
|
||||||
|
PIPE_INIT(0x0040);
|
||||||
|
for (i = 0; i < 4; i++)
|
||||||
|
NV_WRITE_PIPE_INIT(0x00000000);
|
||||||
|
PIPE_INIT_END(0x0040);
|
||||||
|
|
||||||
|
#undef PIPE_INIT
|
||||||
|
#undef PIPE_INIT_END
|
||||||
|
#undef NV_WRITE_PIPE_INIT
|
||||||
|
}
|
||||||
|
|
||||||
static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
|
static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
int i;
|
||||||
int i, j;
|
|
||||||
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) {
|
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) {
|
||||||
if (nv10_graph_ctx_regs[i] == reg)
|
if (nv10_graph_ctx_regs[i] == reg)
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
if (dev_priv->chipset>=0x17) {
|
DRM_ERROR("unknow offset nv10_ctx_regs %d\n", reg);
|
||||||
for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) {
|
return -1;
|
||||||
if (nv17_graph_ctx_regs[j] == reg)
|
}
|
||||||
|
|
||||||
|
static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) {
|
||||||
|
if (nv17_graph_ctx_regs[i] == reg)
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
}
|
DRM_ERROR("unknow offset nv17_ctx_regs %d\n", reg);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -644,13 +655,14 @@ int nv10_graph_load_context(struct nouveau_channel *chan)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = chan->dev;
|
struct drm_device *dev = chan->dev;
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
int i, j;
|
struct graph_state* pgraph_ctx = chan->pgraph_ctx;
|
||||||
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
|
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
|
||||||
NV_WRITE(nv10_graph_ctx_regs[i], chan->pgraph_ctx[i]);
|
NV_WRITE(nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
|
||||||
if (dev_priv->chipset>=0x17) {
|
if (dev_priv->chipset>=0x17) {
|
||||||
for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++)
|
for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++)
|
||||||
NV_WRITE(nv17_graph_ctx_regs[j], chan->pgraph_ctx[i]);
|
NV_WRITE(nv17_graph_ctx_regs[i], pgraph_ctx->nv17[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
nv10_graph_load_pipe(chan);
|
nv10_graph_load_pipe(chan);
|
||||||
|
@ -662,13 +674,14 @@ int nv10_graph_save_context(struct nouveau_channel *chan)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = chan->dev;
|
struct drm_device *dev = chan->dev;
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
int i, j;
|
struct graph_state* pgraph_ctx = chan->pgraph_ctx;
|
||||||
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
|
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
|
||||||
chan->pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]);
|
pgraph_ctx->nv10[i] = NV_READ(nv10_graph_ctx_regs[i]);
|
||||||
if (dev_priv->chipset>=0x17) {
|
if (dev_priv->chipset>=0x17) {
|
||||||
for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++)
|
for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++)
|
||||||
chan->pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]);
|
pgraph_ctx->nv17[i] = NV_READ(nv17_graph_ctx_regs[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
nv10_graph_save_pipe(chan);
|
nv10_graph_save_pipe(chan);
|
||||||
|
@ -700,7 +713,7 @@ void nouveau_nv10_context_switch(struct drm_device *dev)
|
||||||
next = dev_priv->fifos[chid];
|
next = dev_priv->fifos[chid];
|
||||||
|
|
||||||
if (!next) {
|
if (!next) {
|
||||||
DRM_DEBUG("Invalid next channel\n");
|
DRM_ERROR("Invalid next channel\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -708,7 +721,7 @@ void nouveau_nv10_context_switch(struct drm_device *dev)
|
||||||
last = dev_priv->fifos[chid];
|
last = dev_priv->fifos[chid];
|
||||||
|
|
||||||
if (!last) {
|
if (!last) {
|
||||||
DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n",
|
DRM_INFO("WARNING: Invalid last channel, switch to %x\n",
|
||||||
next->id);
|
next->id);
|
||||||
} else {
|
} else {
|
||||||
DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",
|
DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",
|
||||||
|
@ -737,16 +750,27 @@ void nouveau_nv10_context_switch(struct drm_device *dev)
|
||||||
#define NV_WRITE_CTX(reg, val) do { \
|
#define NV_WRITE_CTX(reg, val) do { \
|
||||||
int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
|
int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
|
||||||
if (offset > 0) \
|
if (offset > 0) \
|
||||||
chan->pgraph_ctx[offset] = val; \
|
pgraph_ctx->nv10[offset] = val; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define NV17_WRITE_CTX(reg, val) do { \
|
||||||
|
int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
|
||||||
|
if (offset > 0) \
|
||||||
|
pgraph_ctx->nv17[offset] = val; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
int nv10_graph_create_context(struct nouveau_channel *chan) {
|
int nv10_graph_create_context(struct nouveau_channel *chan) {
|
||||||
struct drm_device *dev = chan->dev;
|
struct drm_device *dev = chan->dev;
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
struct graph_state* pgraph_ctx;
|
||||||
|
|
||||||
DRM_DEBUG("nv10_graph_context_create %d\n", chan->id);
|
DRM_DEBUG("nv10_graph_context_create %d\n", chan->id);
|
||||||
|
|
||||||
memset(chan->pgraph_ctx, 0, sizeof(chan->pgraph_ctx));
|
chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx),
|
||||||
|
DRM_MEM_DRIVER);
|
||||||
|
|
||||||
|
if (pgraph_ctx == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
/* mmio trace suggest that should be done in ddx with methods/objects */
|
/* mmio trace suggest that should be done in ddx with methods/objects */
|
||||||
#if 0
|
#if 0
|
||||||
|
@ -786,12 +810,12 @@ int nv10_graph_create_context(struct nouveau_channel *chan) {
|
||||||
NV_WRITE_CTX(0x00400e34, 0x00080008);
|
NV_WRITE_CTX(0x00400e34, 0x00080008);
|
||||||
if (dev_priv->chipset>=0x17) {
|
if (dev_priv->chipset>=0x17) {
|
||||||
/* is it really needed ??? */
|
/* is it really needed ??? */
|
||||||
NV_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4));
|
NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4));
|
||||||
NV_WRITE_CTX(0x004006b0, NV_READ(0x004006b0));
|
NV17_WRITE_CTX(0x004006b0, NV_READ(0x004006b0));
|
||||||
NV_WRITE_CTX(0x00400eac, 0x0fff0000);
|
NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
|
||||||
NV_WRITE_CTX(0x00400eb0, 0x0fff0000);
|
NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
|
||||||
NV_WRITE_CTX(0x00400ec0, 0x00000080);
|
NV17_WRITE_CTX(0x00400ec0, 0x00000080);
|
||||||
NV_WRITE_CTX(0x00400ed0, 0x00000080);
|
NV17_WRITE_CTX(0x00400ed0, 0x00000080);
|
||||||
}
|
}
|
||||||
NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
|
NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
|
||||||
|
|
||||||
|
@ -803,9 +827,17 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = chan->dev;
|
struct drm_device *dev = chan->dev;
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
struct graph_state* pgraph_ctx = chan->pgraph_ctx;
|
||||||
int chid;
|
int chid;
|
||||||
|
|
||||||
|
drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER);
|
||||||
|
chan->pgraph_ctx = NULL;
|
||||||
|
|
||||||
chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
|
chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
|
||||||
|
|
||||||
|
/* This code seems to corrupt the 3D pipe, but blob seems to do similar things ????
|
||||||
|
*/
|
||||||
|
#if 0
|
||||||
/* does this avoid a potential context switch while we are written graph
|
/* does this avoid a potential context switch while we are written graph
|
||||||
* reg, or we should mask graph interrupt ???
|
* reg, or we should mask graph interrupt ???
|
||||||
*/
|
*/
|
||||||
|
@ -814,10 +846,16 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan)
|
||||||
DRM_INFO("cleanning a channel with graph in current context\n");
|
DRM_INFO("cleanning a channel with graph in current context\n");
|
||||||
nouveau_wait_for_idle(dev);
|
nouveau_wait_for_idle(dev);
|
||||||
DRM_INFO("reseting current graph context\n");
|
DRM_INFO("reseting current graph context\n");
|
||||||
nv10_graph_create_context(chan);
|
/* can't be call here because of dynamic mem alloc */
|
||||||
|
//nv10_graph_create_context(chan);
|
||||||
nv10_graph_load_context(chan);
|
nv10_graph_load_context(chan);
|
||||||
}
|
}
|
||||||
NV_WRITE(NV04_PGRAPH_FIFO, 0x1);
|
NV_WRITE(NV04_PGRAPH_FIFO, 0x1);
|
||||||
|
#else
|
||||||
|
if (chid == chan->id) {
|
||||||
|
DRM_INFO("cleanning a channel with graph in current context\n");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
int nv10_graph_init(struct drm_device *dev) {
|
int nv10_graph_init(struct drm_device *dev) {
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -37,6 +37,7 @@
|
||||||
#define NV41_GRCTX_SIZE (92*1024)
|
#define NV41_GRCTX_SIZE (92*1024)
|
||||||
#define NV43_GRCTX_SIZE (70*1024)
|
#define NV43_GRCTX_SIZE (70*1024)
|
||||||
#define NV46_GRCTX_SIZE (70*1024) /* probably ~64KiB */
|
#define NV46_GRCTX_SIZE (70*1024) /* probably ~64KiB */
|
||||||
|
#define NV47_GRCTX_SIZE (125*1024)
|
||||||
#define NV49_GRCTX_SIZE (164640)
|
#define NV49_GRCTX_SIZE (164640)
|
||||||
#define NV4A_GRCTX_SIZE (64*1024)
|
#define NV4A_GRCTX_SIZE (64*1024)
|
||||||
#define NV4B_GRCTX_SIZE (164640)
|
#define NV4B_GRCTX_SIZE (164640)
|
||||||
|
@ -565,6 +566,136 @@ nv46_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
|
||||||
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This may only work on 7800 AGP cards, will include a warning */
|
||||||
|
static void
|
||||||
|
nv47_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
|
||||||
|
{
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
INSTANCE_WR(ctx, 0x00000000/4, ctx->im_pramin->start);
|
||||||
|
INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff);
|
||||||
|
INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff);
|
||||||
|
INSTANCE_WR(ctx, 0x00000030/4, 0x00000001);
|
||||||
|
INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001);
|
||||||
|
INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00);
|
||||||
|
INSTANCE_WR(ctx, 0x00000128/4, 0x02008821);
|
||||||
|
INSTANCE_WR(ctx, 0x00000178/4, 0x00000040);
|
||||||
|
INSTANCE_WR(ctx, 0x0000017c/4, 0x00000040);
|
||||||
|
INSTANCE_WR(ctx, 0x00000180/4, 0x00000040);
|
||||||
|
INSTANCE_WR(ctx, 0x00000188/4, 0x00000040);
|
||||||
|
for (i=0x00000194; i<=0x000001b0; i+=4)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x80000000);
|
||||||
|
INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c);
|
||||||
|
INSTANCE_WR(ctx, 0x00000340/4, 0x00040000);
|
||||||
|
INSTANCE_WR(ctx, 0x00000350/4, 0x55555555);
|
||||||
|
INSTANCE_WR(ctx, 0x00000354/4, 0x55555555);
|
||||||
|
INSTANCE_WR(ctx, 0x00000358/4, 0x55555555);
|
||||||
|
INSTANCE_WR(ctx, 0x0000035c/4, 0x55555555);
|
||||||
|
INSTANCE_WR(ctx, 0x00000388/4, 0x00000008);
|
||||||
|
INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010);
|
||||||
|
for (i=0x000003c0; i<=0x000003fc; i+=4)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x00000111);
|
||||||
|
INSTANCE_WR(ctx, 0x00000454/4, 0x00000111);
|
||||||
|
INSTANCE_WR(ctx, 0x00000458/4, 0x00080060);
|
||||||
|
INSTANCE_WR(ctx, 0x00000474/4, 0x00000080);
|
||||||
|
INSTANCE_WR(ctx, 0x00000478/4, 0xffff0000);
|
||||||
|
INSTANCE_WR(ctx, 0x0000047c/4, 0x00000001);
|
||||||
|
INSTANCE_WR(ctx, 0x00000490/4, 0x46400000);
|
||||||
|
INSTANCE_WR(ctx, 0x000004a0/4, 0xffff0000);
|
||||||
|
for (i=0x000004a4; i<=0x000004e0; i+=4)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x88888888);
|
||||||
|
INSTANCE_WR(ctx, 0x000004f4/4, 0x0fff0000);
|
||||||
|
INSTANCE_WR(ctx, 0x000004f8/4, 0x0fff0000);
|
||||||
|
INSTANCE_WR(ctx, 0x00000500/4, 0x00011100);
|
||||||
|
for (i=0x0000051c; i<=0x00000558; i+=4)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x07ff0000);
|
||||||
|
INSTANCE_WR(ctx, 0x00000564/4, 0x4b7fffff);
|
||||||
|
INSTANCE_WR(ctx, 0x0000058c/4, 0x30201000);
|
||||||
|
INSTANCE_WR(ctx, 0x00000590/4, 0x70605040);
|
||||||
|
INSTANCE_WR(ctx, 0x00000594/4, 0xb8a89888);
|
||||||
|
INSTANCE_WR(ctx, 0x00000598/4, 0xf8e8d8c8);
|
||||||
|
INSTANCE_WR(ctx, 0x000005ac/4, 0x40100000);
|
||||||
|
INSTANCE_WR(ctx, 0x000005c8/4, 0x0000ffff);
|
||||||
|
INSTANCE_WR(ctx, 0x000005fc/4, 0x435185d6);
|
||||||
|
INSTANCE_WR(ctx, 0x00000600/4, 0x2155b699);
|
||||||
|
INSTANCE_WR(ctx, 0x00000604/4, 0xfedcba98);
|
||||||
|
INSTANCE_WR(ctx, 0x00000608/4, 0x00000098);
|
||||||
|
INSTANCE_WR(ctx, 0x00000618/4, 0xffffffff);
|
||||||
|
INSTANCE_WR(ctx, 0x0000061c/4, 0x00ff7000);
|
||||||
|
INSTANCE_WR(ctx, 0x00000620/4, 0x0000ffff);
|
||||||
|
INSTANCE_WR(ctx, 0x00000630/4, 0x00ff0000);
|
||||||
|
INSTANCE_WR(ctx, 0x0000066c/4, 0x00ffff00);
|
||||||
|
for (i=0x000006b0; i<=0x000006ec; i+=4)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x00018488);
|
||||||
|
for (i=0x000006f0; i<=0x0000072c; i+=4)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x00028202);
|
||||||
|
for (i=0x00000770; i<=0x000007ac; i+=4)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x0000aae4);
|
||||||
|
for (i=0x000007b0; i<=0x000007ec; i+=4)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x01012000);
|
||||||
|
for (i=0x000007f0; i<=0x0000082c; i+=4)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x00080008);
|
||||||
|
for (i=0x00000870; i<=0x000008ac; i+=4)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x00100008);
|
||||||
|
INSTANCE_WR(ctx, 0x00000900/4, 0x0001bc80);
|
||||||
|
INSTANCE_WR(ctx, 0x00000904/4, 0x0001bc80);
|
||||||
|
INSTANCE_WR(ctx, 0x00000908/4, 0x0001bc80);
|
||||||
|
INSTANCE_WR(ctx, 0x0000090c/4, 0x0001bc80);
|
||||||
|
INSTANCE_WR(ctx, 0x00000910/4, 0x00000202);
|
||||||
|
INSTANCE_WR(ctx, 0x00000914/4, 0x00000202);
|
||||||
|
INSTANCE_WR(ctx, 0x00000918/4, 0x00000202);
|
||||||
|
INSTANCE_WR(ctx, 0x0000091c/4, 0x00000202);
|
||||||
|
for (i=0x00000930; i<=0x0000095c; i+=4)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x00000008);
|
||||||
|
INSTANCE_WR(ctx, 0x00000970/4, 0x00000002);
|
||||||
|
INSTANCE_WR(ctx, 0x000009a4/4, 0x00000021);
|
||||||
|
INSTANCE_WR(ctx, 0x000009a8/4, 0x030c30c3);
|
||||||
|
INSTANCE_WR(ctx, 0x000009b4/4, 0x3e020200);
|
||||||
|
INSTANCE_WR(ctx, 0x000009b8/4, 0x00ffffff);
|
||||||
|
INSTANCE_WR(ctx, 0x000009bc/4, 0x40103f00);
|
||||||
|
INSTANCE_WR(ctx, 0x000009c8/4, 0x00040000);
|
||||||
|
INSTANCE_WR(ctx, 0x00000a00/4, 0x00008100);
|
||||||
|
INSTANCE_WR(ctx, 0x00000a8c/4, 0x00000001);
|
||||||
|
INSTANCE_WR(ctx, 0x00000ad0/4, 0x00001001);
|
||||||
|
INSTANCE_WR(ctx, 0x00000adc/4, 0x00000003);
|
||||||
|
INSTANCE_WR(ctx, 0x00000ae0/4, 0x00888001);
|
||||||
|
for (i=0x00000b10; i<=0x00000b8c; i+=4)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0xffffffff);
|
||||||
|
INSTANCE_WR(ctx, 0x00000bb4/4, 0x00000005);
|
||||||
|
INSTANCE_WR(ctx, 0x00000bc0/4, 0x0000ffff);
|
||||||
|
for (i=0x00000bdc; i<=0x00000bf8; i+=4)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x00005555);
|
||||||
|
INSTANCE_WR(ctx, 0x00000bfc/4, 0x00000001);
|
||||||
|
INSTANCE_WR(ctx, 0x00000c34/4, 0x00000001);
|
||||||
|
INSTANCE_WR(ctx, 0x00000c38/4, 0x08e00001);
|
||||||
|
INSTANCE_WR(ctx, 0x00000c3c/4, 0x000e3000);
|
||||||
|
for (i=0x00003000; i<=0x00003078; i+=8)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
||||||
|
for (i=0x00004dc0; i<=0x00006fb0; i+=24)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x00000001);
|
||||||
|
for (i=0x00006fc0; i<=0x000073b0; i+=16)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
||||||
|
for (i=0x00009800; i<=0x0000b9f0; i+=24)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x00000001);
|
||||||
|
for (i=0x0000ba00; i<=0x00010430; i+=24)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
||||||
|
for (i=0x00010440; i<=0x00010830; i+=16)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
||||||
|
for (i=0x00012c80; i<=0x00014e70; i+=24)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x00000001);
|
||||||
|
for (i=0x00014e80; i<=0x00015270; i+=16)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
||||||
|
for (i=0x000176c0; i<=0x000198b0; i+=24)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x00000001);
|
||||||
|
for (i=0x000198c0; i<=0x00019cb0; i+=16)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
||||||
|
for (i=0x0001c100; i<=0x0001e2f0; i+=24)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x00000001);
|
||||||
|
for (i=0x0001e300; i<=0x0001e6f0; i+=16)
|
||||||
|
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
nv49_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
|
nv49_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
|
||||||
{
|
{
|
||||||
|
@ -1361,6 +1492,11 @@ nv40_graph_create_context(struct nouveau_channel *chan)
|
||||||
ctx_size = NV46_GRCTX_SIZE;
|
ctx_size = NV46_GRCTX_SIZE;
|
||||||
ctx_init = nv46_graph_context_init;
|
ctx_init = nv46_graph_context_init;
|
||||||
break;
|
break;
|
||||||
|
case 0x47:
|
||||||
|
DRM_INFO("NV47 warning: If your card behaves strangely, please come to the irc channel\n");
|
||||||
|
ctx_size = NV47_GRCTX_SIZE;
|
||||||
|
ctx_init = nv47_graph_context_init;
|
||||||
|
break;
|
||||||
case 0x49:
|
case 0x49:
|
||||||
ctx_size = NV49_GRCTX_SIZE;
|
ctx_size = NV49_GRCTX_SIZE;
|
||||||
ctx_init = nv49_graph_context_init;
|
ctx_init = nv49_graph_context_init;
|
||||||
|
@ -1675,6 +1811,38 @@ static uint32_t nv46_ctx_voodoo[] = {
|
||||||
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
|
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static uint32_t nv47_ctx_voodoo[] = {
|
||||||
|
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||||
|
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409265, 0x00409606,
|
||||||
|
0x0040a368, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
|
||||||
|
0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
|
||||||
|
0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
|
||||||
|
0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
|
||||||
|
0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
|
||||||
|
0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
|
||||||
|
0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
|
||||||
|
0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d12,
|
||||||
|
0x00500060, 0x00403f87, 0x0060000d, 0x00407ce6, 0x002000f0, 0x0060000a,
|
||||||
|
0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d, 0x0011068b,
|
||||||
|
0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
|
||||||
|
0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7,
|
||||||
|
0x001043e1, 0x00500060, 0x00200268, 0x0060000a, 0x00104800, 0x00108901,
|
||||||
|
0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00, 0x00104a19,
|
||||||
|
0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e, 0x0010cd00,
|
||||||
|
0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
|
||||||
|
0x00104f06, 0x00105406, 0x00105709, 0x00200318, 0x0060000a, 0x00300000,
|
||||||
|
0x00200680, 0x00407500, 0x00200684, 0x00800001, 0x00200b60, 0x0060000a,
|
||||||
|
0x00209540, 0x00407b8a, 0x00201350, 0x00800041, 0x00408c00, 0x00600006,
|
||||||
|
0x004088e6, 0x00700080, 0x0020007a, 0x0060000a, 0x00104280, 0x00200318,
|
||||||
|
0x0060000a, 0x00200004, 0x00800001, 0x00700000, 0x00200000, 0x0060000a,
|
||||||
|
0x00106002, 0x0040a368, 0x00700000, 0x00200000, 0x0060000a, 0x00106002,
|
||||||
|
0x00700080, 0x00400a68, 0x00500060, 0x00600007, 0x00409688, 0x0060000f,
|
||||||
|
0x00500060, 0x00200000, 0x0060000a, 0x00700000, 0x00106001, 0x0091a880,
|
||||||
|
0x00901ffe, 0x10940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c,
|
||||||
|
0x00402168, 0x0040a506, 0x0040a605, 0x00600009, 0x00700005, 0x00700006,
|
||||||
|
0x0060000e, ~0
|
||||||
|
};
|
||||||
|
|
||||||
//this is used for nv49 and nv4b
|
//this is used for nv49 and nv4b
|
||||||
static uint32_t nv49_4b_ctx_voodoo[] ={
|
static uint32_t nv49_4b_ctx_voodoo[] ={
|
||||||
0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020,
|
0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020,
|
||||||
|
@ -1835,6 +2003,7 @@ nv40_graph_init(struct drm_device *dev)
|
||||||
case 0x43: ctx_voodoo = nv43_ctx_voodoo; break;
|
case 0x43: ctx_voodoo = nv43_ctx_voodoo; break;
|
||||||
case 0x44: ctx_voodoo = nv44_ctx_voodoo; break;
|
case 0x44: ctx_voodoo = nv44_ctx_voodoo; break;
|
||||||
case 0x46: ctx_voodoo = nv46_ctx_voodoo; break;
|
case 0x46: ctx_voodoo = nv46_ctx_voodoo; break;
|
||||||
|
case 0x47: ctx_voodoo = nv47_ctx_voodoo; break;
|
||||||
case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break;
|
case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break;
|
||||||
case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break;
|
case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break;
|
||||||
case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break;
|
case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break;
|
||||||
|
|
|
@ -1861,6 +1861,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
|
||||||
OUT_RING((image->width << 16) | height);
|
OUT_RING((image->width << 16) | height);
|
||||||
RADEON_WAIT_UNTIL_2D_IDLE();
|
RADEON_WAIT_UNTIL_2D_IDLE();
|
||||||
ADVANCE_RING();
|
ADVANCE_RING();
|
||||||
|
COMMIT_RING();
|
||||||
|
|
||||||
radeon_cp_discard_buffer(dev, buf);
|
radeon_cp_discard_buffer(dev, buf);
|
||||||
|
|
||||||
|
@ -1878,6 +1879,8 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
|
||||||
RADEON_FLUSH_CACHE();
|
RADEON_FLUSH_CACHE();
|
||||||
RADEON_WAIT_UNTIL_2D_IDLE();
|
RADEON_WAIT_UNTIL_2D_IDLE();
|
||||||
ADVANCE_RING();
|
ADVANCE_RING();
|
||||||
|
COMMIT_RING();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2401,7 +2404,6 @@ static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file
|
||||||
|
|
||||||
ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
|
ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
|
||||||
|
|
||||||
COMMIT_RING();
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -206,7 +206,8 @@ extern int via_fence_has_irq(struct drm_device * dev, uint32_t class,
|
||||||
|
|
||||||
#ifdef VIA_HAVE_BUFFER
|
#ifdef VIA_HAVE_BUFFER
|
||||||
extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev);
|
extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev);
|
||||||
extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *type);
|
extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
|
||||||
|
uint32_t *type);
|
||||||
extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
||||||
extern int via_init_mem_type(struct drm_device *dev, uint32_t type,
|
extern int via_init_mem_type(struct drm_device *dev, uint32_t type,
|
||||||
struct drm_mem_type_manager *man);
|
struct drm_mem_type_manager *man);
|
||||||
|
|
Loading…
Reference in New Issue