Merge branch 'modesetting-gem'

main
Jesse Barnes 2008-12-09 10:23:43 -08:00
commit 6656db1055
244 changed files with 68490 additions and 4997 deletions

View File

@ -117,6 +117,7 @@ AC_OUTPUT([
Makefile
libdrm/Makefile
libdrm/intel/Makefile
libdrm/radeon/Makefile
shared-core/Makefile
tests/Makefile
libdrm.pc])

View File

@ -18,7 +18,7 @@
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
SUBDIRS = . intel
SUBDIRS = . intel radeon
libdrm_la_LTLIBRARIES = libdrm.la
libdrm_ladir = $(libdir)
@ -26,9 +26,10 @@ libdrm_la_LDFLAGS = -version-number 2:4:0 -no-undefined
AM_CFLAGS = -I$(top_srcdir)/shared-core
libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c \
xf86drmMode.c libdrm_lists.h
libdrm_lists.h
libdrmincludedir = ${includedir}
libdrminclude_HEADERS = xf86drm.h
libdrminclude_HEADERS = xf86drm.h xf86drmMode.h
EXTRA_DIST = ChangeLog TODO

View File

@ -66,6 +66,11 @@ struct _drm_intel_bo {
/** Buffer manager context associated with this buffer object */
drm_intel_bufmgr *bufmgr;
/**
* MM-specific handle for accessing object
*/
int handle;
};
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,

View File

@ -833,7 +833,7 @@ drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr, const char *name,
bo_fake->refcount = 1;
bo_fake->id = ++bufmgr_fake->buf_nr;
bo_fake->name = name;
bo_fake->flags = BM_PINNED | DRM_BO_FLAG_NO_MOVE;
bo_fake->flags = BM_PINNED;
bo_fake->is_static = 1;
DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
@ -1070,7 +1070,6 @@ drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake)
if (!(bo_fake->flags & BM_NO_BACKING_STORE))
bo_fake->dirty = 1;
}
}
static int

View File

@ -352,6 +352,7 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
bo_gem->gem_handle = create.handle;
bo_gem->bo.handle = bo_gem->gem_handle;
if (ret != 0) {
free(bo_gem);
return NULL;
@ -425,6 +426,14 @@ drm_intel_gem_bo_reference(drm_intel_bo *bo)
pthread_mutex_unlock(&bufmgr_gem->lock);
}
static void
dri_gem_bo_reference_locked(dri_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
bo_gem->refcount++;
}
static void
drm_intel_gem_bo_reference_locked(drm_intel_bo *bo)
{

48
libdrm/radeon/Makefile.am Normal file
View File

@ -0,0 +1,48 @@
# Copyright © 2008 Jérôme Glisse
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Jérôme Glisse <glisse@freedesktop.org>
AM_CFLAGS = \
$(WARN_CFLAGS) \
-I$(top_srcdir)/libdrm \
-I$(top_srcdir)/libdrm/radeon \
$(PTHREADSTUBS_CFLAGS) \
-I$(top_srcdir)/shared-core
libdrm_radeon_la_LTLIBRARIES = libdrm-radeon.la
libdrm_radeon_ladir = $(libdir)
libdrm_radeon_la_LDFLAGS = -version-number 1:0:0 -no-undefined
libdrm_radeon_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
libdrm_radeon_la_SOURCES = \
radeon_bo_gem.c \
radeon_cs_gem.c \
radeon_track.c
libdrm_radeonincludedir = ${includedir}/drm
libdrm_radeoninclude_HEADERS = \
radeon_bo.h \
radeon_cs.h \
radeon_bo_gem.h \
radeon_cs_gem.h \
radeon_track.h

167
libdrm/radeon/radeon_bo.h Normal file
View File

@ -0,0 +1,167 @@
/*
* Copyright © 2008 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Jérôme Glisse <glisse@freedesktop.org>
*/
#ifndef RADEON_BO_H
#define RADEON_BO_H
#include <stdio.h>
#include <stdint.h>
#include "radeon_track.h"
/* bo object */
#define RADEON_BO_FLAGS_MACRO_TILE 1
#define RADEON_BO_FLAGS_MICRO_TILE 2
struct radeon_bo_manager;
struct radeon_bo {
uint32_t alignment;
uint32_t handle;
uint32_t size;
uint32_t domains;
uint32_t flags;
unsigned cref;
#ifdef RADEON_BO_TRACK
struct radeon_track *track;
#endif
void *ptr;
struct radeon_bo_manager *bom;
};
/* bo functions */
struct radeon_bo_funcs {
struct radeon_bo *(*bo_open)(struct radeon_bo_manager *bom,
uint32_t handle,
uint32_t size,
uint32_t alignment,
uint32_t domains,
uint32_t flags);
void (*bo_ref)(struct radeon_bo *bo);
struct radeon_bo *(*bo_unref)(struct radeon_bo *bo);
int (*bo_map)(struct radeon_bo *bo, int write);
int (*bo_unmap)(struct radeon_bo *bo);
};
struct radeon_bo_manager {
struct radeon_bo_funcs *funcs;
int fd;
struct radeon_tracker tracker;
};
static inline void _radeon_bo_debug(struct radeon_bo *bo,
const char *op,
const char *file,
const char *func,
int line)
{
fprintf(stderr, "%s %p 0x%08X 0x%08X 0x%08X [%s %s %d]\n",
op, bo, bo->handle, bo->size, bo->cref, file, func, line);
}
static inline struct radeon_bo *_radeon_bo_open(struct radeon_bo_manager *bom,
uint32_t handle,
uint32_t size,
uint32_t alignment,
uint32_t domains,
uint32_t flags,
const char *file,
const char *func,
int line)
{
struct radeon_bo *bo;
bo = bom->funcs->bo_open(bom, handle, size, alignment, domains, flags);
#ifdef RADEON_BO_TRACK
if (bo) {
bo->track = radeon_tracker_add_track(&bom->tracker, bo->handle);
radeon_track_add_event(bo->track, file, func, "open", line);
}
#endif
return bo;
}
static inline void _radeon_bo_ref(struct radeon_bo *bo,
const char *file,
const char *func,
int line)
{
bo->cref++;
#ifdef RADEON_BO_TRACK
radeon_track_add_event(bo->track, file, func, "ref", line);
#endif
bo->bom->funcs->bo_ref(bo);
}
static inline struct radeon_bo *_radeon_bo_unref(struct radeon_bo *bo,
const char *file,
const char *func,
int line)
{
bo->cref--;
#ifdef RADEON_BO_TRACK
radeon_track_add_event(bo->track, file, func, "unref", line);
if (bo->cref <= 0) {
radeon_tracker_remove_track(&bo->bom->tracker, bo->track);
bo->track = NULL;
}
#endif
return bo->bom->funcs->bo_unref(bo);
}
static inline int _radeon_bo_map(struct radeon_bo *bo,
int write,
const char *file,
const char *func,
int line)
{
return bo->bom->funcs->bo_map(bo, write);
}
static inline int _radeon_bo_unmap(struct radeon_bo *bo,
const char *file,
const char *func,
int line)
{
return bo->bom->funcs->bo_unmap(bo);
}
#define radeon_bo_open(bom, h, s, a, d, f)\
_radeon_bo_open(bom, h, s, a, d, f, __FILE__, __FUNCTION__, __LINE__)
#define radeon_bo_ref(bo)\
_radeon_bo_ref(bo, __FILE__, __FUNCTION__, __LINE__)
#define radeon_bo_unref(bo)\
_radeon_bo_unref(bo, __FILE__, __FUNCTION__, __LINE__)
#define radeon_bo_map(bo, w)\
_radeon_bo_map(bo, w, __FILE__, __FUNCTION__, __LINE__)
#define radeon_bo_unmap(bo)\
_radeon_bo_unmap(bo, __FILE__, __FUNCTION__, __LINE__)
#define radeon_bo_debug(bo, opcode)\
_radeon_bo_debug(bo, opcode, __FILE__, __FUNCTION__, __LINE__)
#endif

View File

@ -0,0 +1,208 @@
/*
* Copyright © 2008 Dave Airlie
* Copyright © 2008 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Dave Airlie
* Jérôme Glisse <glisse@freedesktop.org>
*/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include "xf86drm.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_bo.h"
#include "radeon_bo_gem.h"
struct radeon_bo_gem {
struct radeon_bo base;
uint32_t name;
int map_count;
};
struct bo_manager_gem {
struct radeon_bo_manager base;
};
static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
uint32_t handle,
uint32_t size,
uint32_t alignment,
uint32_t domains,
uint32_t flags)
{
struct radeon_bo_gem *bo;
int r;
bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
if (bo == NULL) {
return NULL;
}
bo->base.bom = bom;
bo->base.handle = 0;
bo->base.size = size;
bo->base.alignment = alignment;
bo->base.domains = domains;
bo->base.flags = flags;
bo->base.ptr = NULL;
bo->map_count = 0;
if (handle) {
struct drm_gem_open open_arg;
memset(&open_arg, 0, sizeof(open_arg));
open_arg.name = handle;
r = ioctl(bom->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
if (r != 0) {
free(bo);
return NULL;
}
bo->base.handle = open_arg.handle;
bo->base.size = open_arg.size;
bo->name = handle;
} else {
struct drm_radeon_gem_create args;
args.size = size;
args.alignment = alignment;
args.initial_domain = bo->base.domains;
args.no_backing_store = 0;
args.handle = 0;
r = drmCommandWriteRead(bom->fd, DRM_RADEON_GEM_CREATE,
&args, sizeof(args));
bo->base.handle = args.handle;
if (r) {
fprintf(stderr, "Failed to allocate :\n");
fprintf(stderr, " size : %d bytes\n", size);
fprintf(stderr, " alignment : %d bytes\n", alignment);
fprintf(stderr, " domains : %d\n", bo->base.domains);
free(bo);
return NULL;
}
}
radeon_bo_ref((struct radeon_bo*)bo);
return (struct radeon_bo*)bo;
}
static void bo_ref(struct radeon_bo *bo)
{
}
static struct radeon_bo *bo_unref(struct radeon_bo *bo)
{
struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
struct drm_gem_close args;
if (bo == NULL) {
return NULL;
}
if (bo->cref) {
return bo;
}
if (bo_gem->map_count) {
munmap(bo->ptr, bo->size);
}
/* close object */
args.handle = bo->handle;
ioctl(bo->bom->fd, DRM_IOCTL_GEM_CLOSE, &args);
memset(bo_gem, 0, sizeof(struct radeon_bo_gem));
free(bo_gem);
return NULL;
}
static int bo_map(struct radeon_bo *bo, int write)
{
struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
struct drm_radeon_gem_mmap args;
int r;
if (bo_gem->map_count++ != 0) {
return 0;
}
bo->ptr = NULL;
args.handle = bo->handle;
args.offset = 0;
args.size = (uint64_t)bo->size;
r = drmCommandWriteRead(bo->bom->fd,
DRM_RADEON_GEM_MMAP,
&args,
sizeof(args));
if (!r) {
bo->ptr = (void *)(unsigned long)args.addr_ptr;
} else {
fprintf(stderr, "error mapping %p 0x%08X (error = %d)\n",
bo, bo->handle, r);
}
return r;
}
static int bo_unmap(struct radeon_bo *bo)
{
struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
if (--bo_gem->map_count > 0) {
return 0;
}
munmap(bo->ptr, bo->size);
bo->ptr = NULL;
return 0;
}
static struct radeon_bo_funcs bo_gem_funcs = {
bo_open,
bo_ref,
bo_unref,
bo_map,
bo_unmap
};
struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd)
{
struct bo_manager_gem *bomg;
bomg = (struct bo_manager_gem*)calloc(1, sizeof(struct bo_manager_gem));
if (bomg == NULL) {
return NULL;
}
bomg->base.funcs = &bo_gem_funcs;
bomg->base.fd = fd;
return (struct radeon_bo_manager*)bomg;
}
void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom)
{
struct bo_manager_gem *bomg = (struct bo_manager_gem*)bom;
if (bom == NULL) {
return;
}
free(bomg);
}

View File

@ -0,0 +1,40 @@
/*
* Copyright © 2008 Dave Airlie
* Copyright © 2008 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Dave Airlie
* Jérôme Glisse <glisse@freedesktop.org>
*/
#ifndef RADEON_BO_GEM_H
#define RADEON_BO_GEM_H
#include "radeon_bo.h"
struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd);
void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom);
#endif

168
libdrm/radeon/radeon_cs.h Normal file
View File

@ -0,0 +1,168 @@
/*
* Copyright © 2008 Nicolai Haehnle
* Copyright © 2008 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Aapo Tahkola <aet@rasterburn.org>
* Nicolai Haehnle <prefect_@gmx.net>
* Jérôme Glisse <glisse@freedesktop.org>
*/
#ifndef RADEON_CS_H
#define RADEON_CS_H
#include <stdint.h>
#include "radeon_bo.h"
struct radeon_cs_reloc {
struct radeon_bo *bo;
uint32_t start_offset;
uint32_t end_offset;
uint32_t read_domain;
uint32_t write_domain;
uint32_t flags;
};
struct radeon_cs_manager;
struct radeon_cs {
struct radeon_cs_manager *csm;
void *relocs;
uint32_t *packets;
unsigned crelocs;
unsigned relocs_total_size;
unsigned cdw;
unsigned ndw;
int section;
unsigned section_ndw;
unsigned section_cdw;
const char *section_file;
const char *section_func;
int section_line;
};
/* cs functions */
struct radeon_cs_funcs {
struct radeon_cs *(*cs_create)(struct radeon_cs_manager *csm,
uint32_t ndw);
int (*cs_write_dword)(struct radeon_cs *cs, uint32_t dword);
int (*cs_write_reloc)(struct radeon_cs *cs,
struct radeon_bo *bo,
uint32_t start_offset,
uint32_t end_offset,
uint32_t read_domain,
uint32_t write_domain,
uint32_t flags);
int (*cs_begin)(struct radeon_cs *cs,
uint32_t ndw,
const char *file,
const char *func,
int line);
int (*cs_end)(struct radeon_cs *cs,
const char *file,
const char *func,
int line);
int (*cs_emit)(struct radeon_cs *cs);
int (*cs_destroy)(struct radeon_cs *cs);
int (*cs_erase)(struct radeon_cs *cs);
int (*cs_need_flush)(struct radeon_cs *cs);
void (*cs_print)(struct radeon_cs *cs, FILE *file);
};
struct radeon_cs_manager {
struct radeon_cs_funcs *funcs;
int fd;
};
static inline struct radeon_cs *radeon_cs_create(struct radeon_cs_manager *csm,
uint32_t ndw)
{
return csm->funcs->cs_create(csm, ndw);
}
static inline int radeon_cs_write_dword(struct radeon_cs *cs, uint32_t dword)
{
return cs->csm->funcs->cs_write_dword(cs, dword);
}
static inline int radeon_cs_write_reloc(struct radeon_cs *cs,
struct radeon_bo *bo,
uint32_t start_offset,
uint32_t end_offset,
uint32_t read_domain,
uint32_t write_domain,
uint32_t flags)
{
return cs->csm->funcs->cs_write_reloc(cs,
bo,
start_offset,
end_offset,
read_domain,
write_domain,
flags);
}
static inline int radeon_cs_begin(struct radeon_cs *cs,
uint32_t ndw,
const char *file,
const char *func,
int line)
{
return cs->csm->funcs->cs_begin(cs, ndw, file, func, line);
}
static inline int radeon_cs_end(struct radeon_cs *cs,
const char *file,
const char *func,
int line)
{
return cs->csm->funcs->cs_end(cs, file, func, line);
}
static inline int radeon_cs_emit(struct radeon_cs *cs)
{
return cs->csm->funcs->cs_emit(cs);
}
static inline int radeon_cs_destroy(struct radeon_cs *cs)
{
return cs->csm->funcs->cs_destroy(cs);
}
static inline int radeon_cs_erase(struct radeon_cs *cs)
{
return cs->csm->funcs->cs_erase(cs);
}
static inline int radeon_cs_need_flush(struct radeon_cs *cs)
{
return cs->csm->funcs->cs_need_flush(cs);
}
static inline void radeon_cs_print(struct radeon_cs *cs, FILE *file)
{
cs->csm->funcs->cs_print(cs, file);
}
#endif

View File

@ -0,0 +1,438 @@
/*
* Copyright © 2008 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Aapo Tahkola <aet@rasterburn.org>
* Nicolai Haehnle <prefect_@gmx.net>
* Jérôme Glisse <glisse@freedesktop.org>
*/
#include <errno.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include "radeon_cs.h"
#include "radeon_cs_gem.h"
#include "radeon_bo_gem.h"
#include "drm.h"
#include "xf86drm.h"
#include "radeon_drm.h"
#pragma pack(1)
struct cs_reloc_gem {
uint32_t handle;
uint32_t start_offset;
uint32_t end_offset;
uint32_t read_domain;
uint32_t write_domain;
uint32_t flags;
};
#pragma pack()
struct cs_gem {
struct radeon_cs base;
struct drm_radeon_cs2 cs;
struct drm_radeon_cs_chunk chunks[2];
unsigned nrelocs;
uint32_t *relocs;
struct radeon_bo **relocs_bo;
};
static struct radeon_cs *cs_gem_create(struct radeon_cs_manager *csm,
uint32_t ndw)
{
struct cs_gem *csg;
/* max cmd buffer size is 64Kb */
if (ndw > (64 * 1024 / 4)) {
return NULL;
}
csg = (struct cs_gem*)calloc(1, sizeof(struct cs_gem));
if (csg == NULL) {
return NULL;
}
csg->base.csm = csm;
csg->base.ndw = 64 * 1024 / 4;
csg->base.packets = (uint32_t*)calloc(1, 64 * 1024);
if (csg->base.packets == NULL) {
free(csg);
return NULL;
}
csg->base.relocs_total_size = 0;
csg->base.crelocs = 0;
csg->nrelocs = 4096 / (4 * 4) ;
csg->relocs_bo = (struct radeon_bo**)calloc(1,
csg->nrelocs*sizeof(void*));
if (csg->relocs_bo == NULL) {
free(csg->base.packets);
free(csg);
return NULL;
}
csg->base.relocs = csg->relocs = (uint32_t*)calloc(1, 4096);
if (csg->relocs == NULL) {
free(csg->relocs_bo);
free(csg->base.packets);
free(csg);
return NULL;
}
csg->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
csg->chunks[0].length_dw = 0;
csg->chunks[0].chunk_data = (uint64_t)(intptr_t)csg->base.packets;
csg->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
csg->chunks[1].length_dw = 0;
csg->chunks[1].chunk_data = (uint64_t)(intptr_t)csg->relocs;
return (struct radeon_cs*)csg;
}
static int cs_gem_write_dword(struct radeon_cs *cs, uint32_t dword)
{
struct cs_gem *csg = (struct cs_gem*)cs;
if (cs->cdw >= cs->ndw) {
uint32_t tmp, *ptr;
tmp = (cs->cdw + 1 + 0x3FF) & (~0x3FF);
ptr = (uint32_t*)realloc(cs->packets, 4 * tmp);
if (ptr == NULL) {
return -ENOMEM;
}
cs->packets = ptr;
cs->ndw = tmp;
csg->chunks[0].chunk_data = (uint64_t)(intptr_t)csg->base.packets;
}
cs->packets[cs->cdw++] = dword;
csg->chunks[0].length_dw += 1;
return 0;
}
static int cs_gem_write_reloc(struct radeon_cs *cs,
struct radeon_bo *bo,
uint32_t start_offset,
uint32_t end_offset,
uint32_t read_domain,
uint32_t write_domain,
uint32_t flags)
{
struct cs_gem *csg = (struct cs_gem*)cs;
struct cs_reloc_gem *reloc;
uint32_t idx;
unsigned i;
/* check domains */
if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
/* in one CS a bo can only be in read or write domain but not
* in read & write domain at the same sime
*/
return -EINVAL;
}
if (read_domain == RADEON_GEM_DOMAIN_CPU) {
return -EINVAL;
}
if (write_domain == RADEON_GEM_DOMAIN_CPU) {
return -EINVAL;
}
/* check reloc window */
if (end_offset > bo->size) {
return -EINVAL;
}
if (start_offset > end_offset) {
return -EINVAL;
}
/* check if bo is already referenced */
for(i = 0; i < cs->crelocs; i++) {
idx = i * 6;
reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
if (reloc->handle == bo->handle) {
/* Check domains must be in read or write. As we check already
* checked that in argument one of the read or write domain was
* set we only need to check that if previous reloc as the read
* domain set then the read_domain should also be set for this
* new relocation.
*/
if (reloc->read_domain && !read_domain) {
return -EINVAL;
}
if (reloc->write_domain && !write_domain) {
return -EINVAL;
}
reloc->read_domain |= read_domain;
reloc->write_domain |= write_domain;
/* update start and end offset */
if (start_offset < reloc->start_offset) {
reloc->start_offset = start_offset;
}
if (end_offset > reloc->end_offset) {
reloc->end_offset = end_offset;
}
/* update flags */
reloc->flags |= (flags & reloc->flags);
/* write relocation packet */
cs_gem_write_dword(cs, 0xc0001000);
cs_gem_write_dword(cs, idx);
return 0;
}
}
/* new relocation */
if (csg->base.crelocs >= csg->nrelocs) {
/* allocate more memory (TODO: should use a slab allocatore maybe) */
uint32_t *tmp, size;
size = ((csg->nrelocs + 1) * sizeof(struct radeon_bo*));
tmp = (uint32_t*)realloc(csg->relocs_bo, size);
if (tmp == NULL) {
return -ENOMEM;
}
csg->relocs_bo = (struct radeon_bo**)tmp;
size = ((csg->nrelocs + 1) * 6 * 4);
tmp = (uint32_t*)realloc(csg->relocs, size);
if (tmp == NULL) {
return -ENOMEM;
}
cs->relocs = csg->relocs = tmp;
csg->nrelocs += 1;
csg->chunks[1].chunk_data = (uint64_t)(intptr_t)csg->relocs;
}
csg->relocs_bo[csg->base.crelocs] = bo;
idx = (csg->base.crelocs++) * 6;
reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
reloc->handle = bo->handle;
reloc->start_offset = start_offset;
reloc->end_offset = end_offset;
reloc->read_domain = read_domain;
reloc->write_domain = write_domain;
reloc->flags = flags;
csg->chunks[1].length_dw += 6;
radeon_bo_ref(bo);
cs->relocs_total_size += bo->size;
cs_gem_write_dword(cs, 0xc0001000);
cs_gem_write_dword(cs, idx);
return 0;
}
static int cs_gem_begin(struct radeon_cs *cs,
uint32_t ndw,
const char *file,
const char *func,
int line)
{
return 0;
}
static int cs_gem_end(struct radeon_cs *cs,
const char *file,
const char *func,
int line)
{
cs->section = 0;
return 0;
}
static int cs_gem_emit(struct radeon_cs *cs)
{
struct cs_gem *csg = (struct cs_gem*)cs;
uint64_t chunk_array[2];
unsigned i;
int r;
csg->chunks[0].length_dw = cs->cdw;
chunk_array[0] = (uint64_t)(intptr_t)&csg->chunks[0];
chunk_array[1] = (uint64_t)(intptr_t)&csg->chunks[1];
csg->cs.num_chunks = 2;
csg->cs.chunks = (uint64_t)(intptr_t)chunk_array;
r = drmCommandWriteRead(cs->csm->fd, DRM_RADEON_CS2,
&csg->cs, sizeof(struct drm_radeon_cs2));
for (i = 0; i < csg->base.crelocs; i++) {
radeon_bo_unref(csg->relocs_bo[i]);
csg->relocs_bo[i] = NULL;
}
return r;
}
static int cs_gem_destroy(struct radeon_cs *cs)
{
struct cs_gem *csg = (struct cs_gem*)cs;
free(csg->relocs_bo);
free(cs->relocs);
free(cs->packets);
free(cs);
return 0;
}
static int cs_gem_erase(struct radeon_cs *cs)
{
struct cs_gem *csg = (struct cs_gem*)cs;
unsigned i;
if (csg->relocs_bo) {
for (i = 0; i < csg->base.crelocs; i++) {
if (csg->relocs_bo[i]) {
radeon_bo_unref(csg->relocs_bo[i]);
csg->relocs_bo[i] = NULL;
}
}
}
cs->relocs_total_size = 0;
cs->cdw = 0;
cs->section = 0;
cs->crelocs = 0;
csg->chunks[0].length_dw = 0;
csg->chunks[1].length_dw = 0;
return 0;
}
static int cs_gem_need_flush(struct radeon_cs *cs)
{
return (cs->relocs_total_size > (32*1024*1024));
}
#define PACKET_TYPE0 0
#define PACKET_TYPE1 1
#define PACKET_TYPE2 2
#define PACKET_TYPE3 3
#define PACKET3_NOP 0x10
#define PACKET3_SET_SCISSORS 0x1E
#define PACKET3_3D_DRAW_VBUF 0x28
#define PACKET3_3D_DRAW_IMMD 0x29
#define PACKET3_3D_DRAW_INDX 0x2A
#define PACKET3_3D_LOAD_VBPNTR 0x2F
#define PACKET3_INDX_BUFFER 0x33
#define PACKET3_3D_DRAW_VBUF_2 0x34
#define PACKET3_3D_DRAW_IMMD_2 0x35
#define PACKET3_3D_DRAW_INDX_2 0x36
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
static void cs_gem_print(struct radeon_cs *cs, FILE *file)
{
unsigned opcode;
unsigned reg;
unsigned cnt;
int i, j;
for (i = 0; i < cs->cdw;) {
cnt = CP_PACKET_GET_COUNT(cs->packets[i]);
switch (CP_PACKET_GET_TYPE(cs->packets[i])) {
case PACKET_TYPE0:
fprintf(file, "Pkt0 at %d (%d dwords):\n", i, cnt + 1);
reg = CP_PACKET0_GET_REG(cs->packets[i]);
if (CP_PACKET0_GET_ONE_REG_WR(cs->packets[i++])) {
for (j = 0; j <= cnt; j++) {
fprintf(file, " 0x%08X -> 0x%04X\n",
cs->packets[i++], reg);
}
} else {
for (j = 0; j <= cnt; j++) {
fprintf(file, " 0x%08X -> 0x%04X\n",
cs->packets[i++], reg);
reg += 4;
}
}
break;
case PACKET_TYPE3:
fprintf(file, "Pkt3 at %d :\n", i);
opcode = CP_PACKET3_GET_OPCODE(cs->packets[i++]);
switch (opcode) {
case PACKET3_NOP:
fprintf(file, " PACKET3_NOP:\n");
break;
case PACKET3_3D_DRAW_VBUF:
fprintf(file, " PACKET3_3D_DRAW_VBUF:\n");
break;
case PACKET3_3D_DRAW_IMMD:
fprintf(file, " PACKET3_3D_DRAW_IMMD:\n");
break;
case PACKET3_3D_DRAW_INDX:
fprintf(file, " PACKET3_3D_DRAW_INDX:\n");
break;
case PACKET3_3D_LOAD_VBPNTR:
fprintf(file, " PACKET3_3D_LOAD_VBPNTR:\n");
break;
case PACKET3_INDX_BUFFER:
fprintf(file, " PACKET3_INDX_BUFFER:\n");
break;
case PACKET3_3D_DRAW_VBUF_2:
fprintf(file, " PACKET3_3D_DRAW_VBUF_2:\n");
break;
case PACKET3_3D_DRAW_IMMD_2:
fprintf(file, " PACKET3_3D_DRAW_IMMD_2:\n");
break;
case PACKET3_3D_DRAW_INDX_2:
fprintf(file, " PACKET3_3D_DRAW_INDX_2:\n");
break;
default:
fprintf(file, "Unknow opcode 0x%02X at %d\n", opcode, i);
return;
}
for (j = 0; j <= cnt; j++) {
fprintf(file, " 0x%08X\n", cs->packets[i++]);
}
break;
case PACKET_TYPE1:
case PACKET_TYPE2:
default:
fprintf(file, "Unknow packet 0x%08X at %d\n", cs->packets[i], i);
return;
}
}
}
static struct radeon_cs_funcs radeon_cs_gem_funcs = {
cs_gem_create,
cs_gem_write_dword,
cs_gem_write_reloc,
cs_gem_begin,
cs_gem_end,
cs_gem_emit,
cs_gem_destroy,
cs_gem_erase,
cs_gem_need_flush,
cs_gem_print
};
struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd)
{
struct radeon_cs_manager *csm;
csm = (struct radeon_cs_manager*)calloc(1,
sizeof(struct radeon_cs_manager));
if (csm == NULL) {
return NULL;
}
csm->funcs = &radeon_cs_gem_funcs;
csm->fd = fd;
return csm;
}
void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm)
{
free(csm);
}

View File

@ -0,0 +1,41 @@
/*
* Copyright © 2008 Nicolai Haehnle
* Copyright © 2008 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Aapo Tahkola <aet@rasterburn.org>
* Nicolai Haehnle <prefect_@gmx.net>
* Jérôme Glisse <glisse@freedesktop.org>
*/
#ifndef RADEON_CS_GEM_H
#define RADEON_CS_GEM_H
#include "radeon_cs.h"
struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd);
void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm);
#endif

View File

@ -0,0 +1,140 @@
/*
* Copyright © 2008 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Jérôme Glisse <glisse@freedesktop.org>
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "radeon_track.h"
void radeon_track_add_event(struct radeon_track *track,
const char *file,
const char *func,
const char *op,
unsigned line)
{
struct radeon_track_event *event;
if (track == NULL) {
return;
}
event = (void*)calloc(1,sizeof(struct radeon_track_event));
if (event == NULL) {
return;
}
event->line = line;
event->file = strdup(file);
event->func = strdup(func);
event->op = strdup(op);
if (event->file == NULL || event->func == NULL || event->op == NULL) {
free(event->file);
free(event->func);
free(event->op);
free(event);
return;
}
event->next = track->events;
track->events = event;
}
struct radeon_track *radeon_tracker_add_track(struct radeon_tracker *tracker,
unsigned key)
{
struct radeon_track *track;
track = (struct radeon_track*)calloc(1, sizeof(struct radeon_track));
if (track) {
track->next = tracker->tracks.next;
track->prev = &tracker->tracks;
tracker->tracks.next = track;
if (track->next) {
track->next->prev = track;
}
track->key = key;
track->events = NULL;
}
return track;
}
void radeon_tracker_remove_track(struct radeon_tracker *tracker,
struct radeon_track *track)
{
struct radeon_track_event *event;
void *tmp;
if (track == NULL) {
return;
}
track->prev->next = track->next;
if (track->next) {
track->next->prev = track->prev;
}
track->next = track->prev = NULL;
event = track->events;
while (event) {
tmp = event;
free(event->file);
free(event->func);
free(event->op);
event = event->next;
free(tmp);
}
track->events = NULL;
free(track);
}
void radeon_tracker_print(struct radeon_tracker *tracker, FILE *file)
{
struct radeon_track *track;
struct radeon_track_event *event;
void *tmp;
track = tracker->tracks.next;
while (track) {
event = track->events;
fprintf(file, "[0x%08X] :\n", track->key);
while (event) {
tmp = event;
fprintf(file, " [0x%08X:%s](%s:%s:%d)\n",
track->key, event->op, event->file,
event->func, event->line);
free(event->file);
free(event->func);
free(event->op);
event->file = NULL;
event->func = NULL;
event->op = NULL;
event = event->next;
free(tmp);
}
track->events = NULL;
tmp = track;
track = track->next;
free(tmp);
}
}

View File

@ -0,0 +1,64 @@
/*
* Copyright © 2008 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Jérôme Glisse <glisse@freedesktop.org>
*/
#ifndef RADEON_TRACK_H
#define RADEON_TRACK_H
struct radeon_track_event {
struct radeon_track_event *next;
char *file;
char *func;
char *op;
unsigned line;
};
struct radeon_track {
struct radeon_track *next;
struct radeon_track *prev;
unsigned key;
struct radeon_track_event *events;
};
struct radeon_tracker {
struct radeon_track tracks;
};
void radeon_track_add_event(struct radeon_track *track,
const char *file,
const char *func,
const char *op,
unsigned line);
struct radeon_track *radeon_tracker_add_track(struct radeon_tracker *tracker,
unsigned key);
void radeon_tracker_remove_track(struct radeon_tracker *tracker,
struct radeon_track *track);
void radeon_tracker_print(struct radeon_tracker *tracker,
FILE *file);
#endif

View File

@ -87,6 +87,9 @@
#define DRM_MSG_VERBOSITY 3
#define DRM_NODE_CONTROL 0
#define DRM_NODE_RENDER 1
static drmServerInfoPtr drm_server_info;
void drmSetServerInfo(drmServerInfoPtr info)
@ -277,7 +280,7 @@ static int drmMatchBusID(const char *id1, const char *id2)
* special file node with the major and minor numbers specified by \p dev and
* parent directory if necessary and was called by root.
*/
static int drmOpenDevice(long dev, int minor)
static int drmOpenDevice(long dev, int minor, int type)
{
stat_t st;
char buf[64];
@ -287,7 +290,7 @@ static int drmOpenDevice(long dev, int minor)
uid_t user = DRM_DEV_UID;
gid_t group = DRM_DEV_GID, serv_group;
sprintf(buf, DRM_DEV_NAME, DRM_DIR_NAME, minor);
sprintf(buf, type ? DRM_DEV_NAME : DRM_CONTROL_DEV_NAME, DRM_DIR_NAME, minor);
drmMsg("drmOpenDevice: node name is %s\n", buf);
if (drm_server_info) {
@ -386,15 +389,15 @@ wait_for_udev:
* Calls drmOpenDevice() if \p create is set, otherwise assembles the device
* name from \p minor and opens it.
*/
static int drmOpenMinor(int minor, int create)
static int drmOpenMinor(int minor, int create, int type)
{
int fd;
char buf[64];
if (create)
return drmOpenDevice(makedev(DRM_MAJOR, minor), minor);
return drmOpenDevice(makedev(DRM_MAJOR, minor), minor, type);
sprintf(buf, DRM_DEV_NAME, DRM_DIR_NAME, minor);
sprintf(buf, type ? DRM_DEV_NAME : DRM_CONTROL_DEV_NAME, DRM_DIR_NAME, minor);
if ((fd = open(buf, O_RDWR, 0)) >= 0)
return fd;
return -errno;
@ -417,7 +420,7 @@ int drmAvailable(void)
int retval = 0;
int fd;
if ((fd = drmOpenMinor(0, 1)) < 0) {
if ((fd = drmOpenMinor(0, 1, DRM_NODE_RENDER)) < 0) {
#ifdef __linux__
/* Try proc for backward Linux compatibility */
if (!access("/proc/dri/0", R_OK))
@ -458,7 +461,7 @@ static int drmOpenByBusid(const char *busid)
drmMsg("drmOpenByBusid: Searching for BusID %s\n", busid);
for (i = 0; i < DRM_MAX_MINOR; i++) {
fd = drmOpenMinor(i, 1);
fd = drmOpenMinor(i, 1, DRM_NODE_RENDER);
drmMsg("drmOpenByBusid: drmOpenMinor returns %d\n", fd);
if (fd >= 0) {
sv.drm_di_major = 1;
@ -520,7 +523,7 @@ static int drmOpenByName(const char *name)
* already in use. If it's in use it will have a busid assigned already.
*/
for (i = 0; i < DRM_MAX_MINOR; i++) {
if ((fd = drmOpenMinor(i, 1)) >= 0) {
if ((fd = drmOpenMinor(i, 1, DRM_NODE_RENDER)) >= 0) {
if ((version = drmGetVersion(fd))) {
if (!strcmp(version->name, name)) {
drmFreeVersion(version);
@ -564,7 +567,7 @@ static int drmOpenByName(const char *name)
if (*pt) { /* Found busid */
return drmOpenByBusid(++pt);
} else { /* No busid */
return drmOpenDevice(strtol(devstring, NULL, 0),i);
return drmOpenDevice(strtol(devstring, NULL, 0),i, DRM_NODE_RENDER);
}
}
}
@ -614,6 +617,10 @@ int drmOpen(const char *name, const char *busid)
return -1;
}
int drmOpenControl(int minor)
{
return drmOpenMinor(minor, 0, DRM_NODE_CONTROL);
}
/**
* Free the version information returned by drmGetVersion().
@ -2434,3 +2441,20 @@ void drmCloseOnce(int fd)
}
}
}
int drmSetMaster(int fd)
{
int ret;
fprintf(stderr,"Setting master \n");
ret = ioctl(fd, DRM_IOCTL_SET_MASTER, 0);
return ret;
}
int drmDropMaster(int fd)
{
int ret;
fprintf(stderr,"Dropping master \n");
ret = ioctl(fd, DRM_IOCTL_DROP_MASTER, 0);
return ret;
}

View File

@ -49,6 +49,7 @@
#define DRM_DIR_NAME "/dev/dri"
#define DRM_DEV_NAME "%s/card%d"
#define DRM_CONTROL_DEV_NAME "%s/controlD%d"
#define DRM_PROC_NAME "/proc/dri/" /* For backward Linux compatibility */
#define DRM_ERR_NO_DEVICE (-1001)
@ -508,6 +509,7 @@ do { register unsigned int __old __asm("o0"); \
/* General user-level programmer's API: unprivileged */
extern int drmAvailable(void);
extern int drmOpen(const char *name, const char *busid);
extern int drmOpenControl(int minor);
extern int drmClose(int fd);
extern drmVersionPtr drmGetVersion(int fd);
extern drmVersionPtr drmGetLibVersion(int fd);
@ -659,4 +661,7 @@ extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);
extern void drmCloseOnce(int fd);
extern void drmMsg(const char *format, ...);
extern int drmSetMaster(int fd);
extern int drmDropMaster(int fd);
#endif

684
libdrm/xf86drmMode.c Normal file
View File

@ -0,0 +1,684 @@
/*
* \file xf86drmMode.c
* Header for DRM modesetting interface.
*
* \author Jakob Bornecrantz <wallbraker@gmail.com>
*
* \par Acknowledgements:
* Feb 2007, Dave Airlie <airlied@linux.ie>
*/
/*
* Copyright (c) <year> <copyright holders>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
/*
* TODO the types we are after are defined in diffrent headers on diffrent
* platforms find which headers to include to get uint32_t
*/
#include <stdint.h>
#include <sys/ioctl.h>
#include <stdio.h>
#include "xf86drmMode.h"
#include "xf86drm.h"
#include <drm.h>
#include <string.h>
#include <dirent.h>
#include <errno.h>
#define U642VOID(x) ((void *)(unsigned long)(x))
#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
/*
* Util functions
*/
void* drmAllocCpy(void *array, int count, int entry_size)
{
char *r;
int i;
if (!count || !array || !entry_size)
return 0;
if (!(r = drmMalloc(count*entry_size)))
return 0;
for (i = 0; i < count; i++)
memcpy(r+(entry_size*i), array+(entry_size*i), entry_size);
return r;
}
/*
* A couple of free functions.
*/
void drmModeFreeModeInfo(struct drm_mode_modeinfo *ptr)
{
if (!ptr)
return;
drmFree(ptr);
}
void drmModeFreeResources(drmModeResPtr ptr)
{
if (!ptr)
return;
drmFree(ptr);
}
void drmModeFreeFB(drmModeFBPtr ptr)
{
if (!ptr)
return;
/* we might add more frees later. */
drmFree(ptr);
}
void drmModeFreeCrtc(drmModeCrtcPtr ptr)
{
if (!ptr)
return;
drmFree(ptr);
}
void drmModeFreeConnector(drmModeConnectorPtr ptr)
{
if (!ptr)
return;
drmFree(ptr->modes);
drmFree(ptr);
}
void drmModeFreeEncoder(drmModeEncoderPtr ptr)
{
drmFree(ptr);
}
/*
* ModeSetting functions.
*/
drmModeResPtr drmModeGetResources(int fd)
{
struct drm_mode_card_res res;
drmModeResPtr r = 0;
memset(&res, 0, sizeof(struct drm_mode_card_res));
if (ioctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res))
return 0;
if (res.count_fbs)
res.fb_id_ptr = VOID2U64(drmMalloc(res.count_fbs*sizeof(uint32_t)));
if (res.count_crtcs)
res.crtc_id_ptr = VOID2U64(drmMalloc(res.count_crtcs*sizeof(uint32_t)));
if (res.count_connectors)
res.connector_id_ptr = VOID2U64(drmMalloc(res.count_connectors*sizeof(uint32_t)));
if (res.count_encoders)
res.encoder_id_ptr = VOID2U64(drmMalloc(res.count_encoders*sizeof(uint32_t)));
if (ioctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res)) {
r = NULL;
goto err_allocs;
}
/*
* return
*/
if (!(r = drmMalloc(sizeof(*r))))
return 0;
r->min_width = res.min_width;
r->max_width = res.max_width;
r->min_height = res.min_height;
r->max_height = res.max_height;
r->count_fbs = res.count_fbs;
r->count_crtcs = res.count_crtcs;
r->count_connectors = res.count_connectors;
r->count_encoders = res.count_encoders;
/* TODO we realy should test if these allocs fails. */
r->fbs = drmAllocCpy(U642VOID(res.fb_id_ptr), res.count_fbs, sizeof(uint32_t));
r->crtcs = drmAllocCpy(U642VOID(res.crtc_id_ptr), res.count_crtcs, sizeof(uint32_t));
r->connectors = drmAllocCpy(U642VOID(res.connector_id_ptr), res.count_connectors, sizeof(uint32_t));
r->encoders = drmAllocCpy(U642VOID(res.encoder_id_ptr), res.count_encoders, sizeof(uint32_t));
err_allocs:
drmFree(U642VOID(res.fb_id_ptr));
drmFree(U642VOID(res.crtc_id_ptr));
drmFree(U642VOID(res.connector_id_ptr));
drmFree(U642VOID(res.encoder_id_ptr));
return r;
}
int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth,
uint8_t bpp, uint32_t pitch, uint32_t bo_handle,
uint32_t *buf_id)
{
struct drm_mode_fb_cmd f;
int ret;
f.width = width;
f.height = height;
f.pitch = pitch;
f.bpp = bpp;
f.depth = depth;
f.handle = bo_handle;
if ((ret = ioctl(fd, DRM_IOCTL_MODE_ADDFB, &f)))
return ret;
*buf_id = f.fb_id;
return 0;
}
int drmModeRmFB(int fd, uint32_t bufferId)
{
return ioctl(fd, DRM_IOCTL_MODE_RMFB, &bufferId);
}
drmModeFBPtr drmModeGetFB(int fd, uint32_t buf)
{
struct drm_mode_fb_cmd info;
drmModeFBPtr r;
info.fb_id = buf;
if (ioctl(fd, DRM_IOCTL_MODE_GETFB, &info))
return NULL;
if (!(r = drmMalloc(sizeof(*r))))
return NULL;
r->fb_id = info.fb_id;
r->width = info.width;
r->height = info.height;
r->pitch = info.pitch;
r->bpp = info.bpp;
r->handle = info.handle;
r->depth = info.depth;
return r;
}
/*
* Crtc functions
*/
drmModeCrtcPtr drmModeGetCrtc(int fd, uint32_t crtcId)
{
struct drm_mode_crtc crtc;
drmModeCrtcPtr r;
crtc.crtc_id = crtcId;
if (ioctl(fd, DRM_IOCTL_MODE_GETCRTC, &crtc))
return 0;
/*
* return
*/
if (!(r = drmMalloc(sizeof(*r))))
return 0;
r->crtc_id = crtc.crtc_id;
r->x = crtc.x;
r->y = crtc.y;
r->mode_valid = crtc.mode_valid;
if (r->mode_valid)
memcpy(&r->mode, &crtc.mode, sizeof(struct drm_mode_modeinfo));
r->buffer_id = crtc.fb_id;
r->gamma_size = crtc.gamma_size;
return r;
}
int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId,
uint32_t x, uint32_t y, uint32_t *connectors, int count,
struct drm_mode_modeinfo *mode)
{
struct drm_mode_crtc crtc;
crtc.x = x;
crtc.y = y;
crtc.crtc_id = crtcId;
crtc.fb_id = bufferId;
crtc.set_connectors_ptr = VOID2U64(connectors);
crtc.count_connectors = count;
if (mode) {
memcpy(&crtc.mode, mode, sizeof(struct drm_mode_modeinfo));
crtc.mode_valid = 1;
} else
crtc.mode_valid = 0;
return ioctl(fd, DRM_IOCTL_MODE_SETCRTC, &crtc);
}
/*
* Cursor manipulation
*/
int drmModeSetCursor(int fd, uint32_t crtcId, uint32_t bo_handle, uint32_t width, uint32_t height)
{
struct drm_mode_cursor arg;
arg.flags = DRM_MODE_CURSOR_BO;
arg.crtc_id = crtcId;
arg.width = width;
arg.height = height;
arg.handle = bo_handle;
return ioctl(fd, DRM_IOCTL_MODE_CURSOR, &arg);
}
int drmModeMoveCursor(int fd, uint32_t crtcId, int x, int y)
{
struct drm_mode_cursor arg;
arg.flags = DRM_MODE_CURSOR_MOVE;
arg.crtc_id = crtcId;
arg.x = x;
arg.y = y;
return ioctl(fd, DRM_IOCTL_MODE_CURSOR, &arg);
}
/*
* Encoder get
*/
drmModeEncoderPtr drmModeGetEncoder(int fd, uint32_t encoder_id)
{
struct drm_mode_get_encoder enc;
drmModeEncoderPtr r = NULL;
enc.encoder_id = encoder_id;
enc.encoder_type = 0;
enc.possible_crtcs = 0;
enc.possible_clones = 0;
if (ioctl(fd, DRM_IOCTL_MODE_GETENCODER, &enc))
return 0;
if (!(r = drmMalloc(sizeof(*r))))
return 0;
r->encoder_id = enc.encoder_id;
r->crtc_id = enc.crtc_id;
r->encoder_type = enc.encoder_type;
r->possible_crtcs = enc.possible_crtcs;
r->possible_clones = enc.possible_clones;
return r;
}
/*
* Connector manipulation
*/
drmModeConnectorPtr drmModeGetConnector(int fd, uint32_t connector_id)
{
struct drm_mode_get_connector conn;
drmModeConnectorPtr r = NULL;
conn.connector_id = connector_id;
conn.connector_type_id = 0;
conn.connector_type = 0;
conn.count_modes = 0;
conn.modes_ptr = 0;
conn.count_props = 0;
conn.props_ptr = 0;
conn.prop_values_ptr = 0;
conn.count_encoders = 0;
conn.encoders_ptr = 0;
if (ioctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn))
return 0;
if (conn.count_props) {
conn.props_ptr = VOID2U64(drmMalloc(conn.count_props*sizeof(uint32_t)));
conn.prop_values_ptr = VOID2U64(drmMalloc(conn.count_props*sizeof(uint64_t)));
}
if (conn.count_modes)
conn.modes_ptr = VOID2U64(drmMalloc(conn.count_modes*sizeof(struct drm_mode_modeinfo)));
if (conn.count_encoders)
conn.encoders_ptr = VOID2U64(drmMalloc(conn.count_encoders*sizeof(uint32_t)));
if (ioctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn))
goto err_allocs;
if(!(r = drmMalloc(sizeof(*r)))) {
goto err_allocs;
}
r->connector_id = conn.connector_id;
r->encoder_id = conn.encoder_id;
r->connection = conn.connection;
r->mmWidth = conn.mm_width;
r->mmHeight = conn.mm_height;
r->subpixel = conn.subpixel;
r->count_modes = conn.count_modes;
/* TODO we should test if these alloc & cpy fails. */
r->count_props = conn.count_props;
r->props = drmAllocCpy(U642VOID(conn.props_ptr), conn.count_props, sizeof(uint32_t));
r->prop_values = drmAllocCpy(U642VOID(conn.prop_values_ptr), conn.count_props, sizeof(uint64_t));
r->modes = drmAllocCpy(U642VOID(conn.modes_ptr), conn.count_modes, sizeof(struct drm_mode_modeinfo));
r->count_encoders = conn.count_encoders;
r->encoders = drmAllocCpy(U642VOID(conn.encoders_ptr), conn.count_encoders, sizeof(uint32_t));
r->connector_type = conn.connector_type;
r->connector_type_id = conn.connector_type_id;
if (!r->props || !r->prop_values || !r->modes || !r->encoders)
goto err_allocs;
err_allocs:
drmFree(U642VOID(conn.prop_values_ptr));
drmFree(U642VOID(conn.props_ptr));
drmFree(U642VOID(conn.modes_ptr));
drmFree(U642VOID(conn.encoders_ptr));
return r;
}
int drmModeAttachMode(int fd, uint32_t connector_id, struct drm_mode_modeinfo *mode_info)
{
struct drm_mode_mode_cmd res;
memcpy(&res.mode, mode_info, sizeof(struct drm_mode_modeinfo));
res.connector_id = connector_id;
return ioctl(fd, DRM_IOCTL_MODE_ATTACHMODE, &res);
}
int drmModeDetachMode(int fd, uint32_t connector_id, struct drm_mode_modeinfo *mode_info)
{
struct drm_mode_mode_cmd res;
memcpy(&res.mode, mode_info, sizeof(struct drm_mode_modeinfo));
res.connector_id = connector_id;
return ioctl(fd, DRM_IOCTL_MODE_DETACHMODE, &res);
}
drmModePropertyPtr drmModeGetProperty(int fd, uint32_t property_id)
{
struct drm_mode_get_property prop;
drmModePropertyPtr r;
prop.prop_id = property_id;
prop.count_enum_blobs = 0;
prop.count_values = 0;
prop.flags = 0;
prop.enum_blob_ptr = 0;
prop.values_ptr = 0;
if (ioctl(fd, DRM_IOCTL_MODE_GETPROPERTY, &prop))
return 0;
if (prop.count_values)
prop.values_ptr = VOID2U64(drmMalloc(prop.count_values * sizeof(uint64_t)));
if (prop.count_enum_blobs && (prop.flags & DRM_MODE_PROP_ENUM))
prop.enum_blob_ptr = VOID2U64(drmMalloc(prop.count_enum_blobs * sizeof(struct drm_mode_property_enum)));
if (prop.count_enum_blobs && (prop.flags & DRM_MODE_PROP_BLOB)) {
prop.values_ptr = VOID2U64(drmMalloc(prop.count_enum_blobs * sizeof(uint32_t)));
prop.enum_blob_ptr = VOID2U64(drmMalloc(prop.count_enum_blobs * sizeof(uint32_t)));
}
if (ioctl(fd, DRM_IOCTL_MODE_GETPROPERTY, &prop)) {
r = NULL;
goto err_allocs;
}
if (!(r = drmMalloc(sizeof(*r))))
return NULL;
r->prop_id = prop.prop_id;
r->count_values = prop.count_values;
r->flags = prop.flags;
if (prop.count_values)
r->values = drmAllocCpy(U642VOID(prop.values_ptr), prop.count_values, sizeof(uint64_t));
if (prop.flags & DRM_MODE_PROP_ENUM) {
r->count_enums = prop.count_enum_blobs;
r->enums = drmAllocCpy(U642VOID(prop.enum_blob_ptr), prop.count_enum_blobs, sizeof(struct drm_mode_property_enum));
} else if (prop.flags & DRM_MODE_PROP_BLOB) {
r->values = drmAllocCpy(U642VOID(prop.values_ptr), prop.count_enum_blobs, sizeof(uint32_t));
r->blob_ids = drmAllocCpy(U642VOID(prop.enum_blob_ptr), prop.count_enum_blobs, sizeof(uint32_t));
r->count_blobs = prop.count_enum_blobs;
}
strncpy(r->name, prop.name, DRM_PROP_NAME_LEN);
r->name[DRM_PROP_NAME_LEN-1] = 0;
err_allocs:
drmFree(U642VOID(prop.values_ptr));
drmFree(U642VOID(prop.enum_blob_ptr));
return r;
}
void drmModeFreeProperty(drmModePropertyPtr ptr)
{
if (!ptr)
return;
drmFree(ptr->values);
drmFree(ptr->enums);
drmFree(ptr);
}
drmModePropertyBlobPtr drmModeGetPropertyBlob(int fd, uint32_t blob_id)
{
struct drm_mode_get_blob blob;
drmModePropertyBlobPtr r;
blob.length = 0;
blob.data = 0;
blob.blob_id = blob_id;
if (ioctl(fd, DRM_IOCTL_MODE_GETPROPBLOB, &blob))
return NULL;
if (blob.length)
blob.data = VOID2U64(drmMalloc(blob.length));
if (ioctl(fd, DRM_IOCTL_MODE_GETPROPBLOB, &blob)) {
r = NULL;
goto err_allocs;
}
if (!(r = drmMalloc(sizeof(*r))))
return NULL;
r->id = blob.blob_id;
r->length = blob.length;
r->data = drmAllocCpy(U642VOID(blob.data), 1, blob.length);
err_allocs:
drmFree(U642VOID(blob.data));
return r;
}
void drmModeFreePropertyBlob(drmModePropertyBlobPtr ptr)
{
if (!ptr)
return;
drmFree(ptr->data);
drmFree(ptr);
}
int drmModeConnectorSetProperty(int fd, uint32_t connector_id, uint32_t property_id,
uint64_t value)
{
struct drm_mode_connector_set_property osp;
int ret;
osp.connector_id = connector_id;
osp.prop_id = property_id;
osp.value = value;
if ((ret = ioctl(fd, DRM_IOCTL_MODE_SETPROPERTY, &osp)))
return ret;
return 0;
}
/*
* checks if a modesetting capable driver has attached to the pci id
* returns 0 if modesetting supported.
* -EINVAL or invalid bus id
* -ENOSYS if no modesetting support
*/
int drmCheckModesettingSupported(const char *busid)
{
#ifdef __linux__
char pci_dev_dir[1024];
int domain, bus, dev, func;
DIR *sysdir;
struct dirent *dent;
int found = 0, ret;
ret = sscanf(busid, "pci:%04x:%02x:%02x.%d", &domain, &bus, &dev, &func);
if (ret != 4)
return -EINVAL;
sprintf(pci_dev_dir, "/sys/bus/pci/devices/%04x:%02x:%02x.%d/drm",
domain, bus, dev, func);
sysdir = opendir(pci_dev_dir);
if (sysdir) {
dent = readdir(sysdir);
while (dent) {
if (!strncmp(dent->d_name, "controlD", 8)) {
found = 1;
break;
}
dent = readdir(sysdir);
}
closedir(sysdir);
if (found)
return 0;
}
sprintf(pci_dev_dir, "/sys/bus/pci/devices/%04x:%02x:%02x.%d/",
domain, bus, dev, func);
sysdir = opendir(pci_dev_dir);
if (!sysdir)
return -EINVAL;
dent = readdir(sysdir);
while (dent) {
if (!strncmp(dent->d_name, "drm:controlD", 12)) {
found = 1;
break;
}
dent = readdir(sysdir);
}
closedir(sysdir);
if (found)
return 0;
#endif
return -ENOSYS;
}
int drmModeReplaceFB(int fd, uint32_t buffer_id,
uint32_t width, uint32_t height, uint8_t depth,
uint8_t bpp, uint32_t pitch, uint32_t bo_handle)
{
struct drm_mode_fb_cmd f;
int ret;
f.width = width;
f.height = height;
f.pitch = pitch;
f.bpp = bpp;
f.depth = depth;
f.handle = bo_handle;
f.fb_id = buffer_id;
if ((ret = ioctl(fd, DRM_IOCTL_MODE_REPLACEFB, &f)))
return ret;
return 0;
}
int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size,
uint16_t *red, uint16_t *green, uint16_t *blue)
{
int ret;
struct drm_mode_crtc_lut l;
l.crtc_id = crtc_id;
l.gamma_size = size;
l.red = VOID2U64(red);
l.green = VOID2U64(green);
l.blue = VOID2U64(blue);
if ((ret = ioctl(fd, DRM_IOCTL_MODE_GETGAMMA, &l)))
return ret;
return 0;
}
int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size,
uint16_t *red, uint16_t *green, uint16_t *blue)
{
int ret;
struct drm_mode_crtc_lut l;
l.crtc_id = crtc_id;
l.gamma_size = size;
l.red = VOID2U64(red);
l.green = VOID2U64(green);
l.blue = VOID2U64(blue);
if ((ret = ioctl(fd, DRM_IOCTL_MODE_SETGAMMA, &l)))
return ret;
return 0;
}

256
libdrm/xf86drmMode.h Normal file
View File

@ -0,0 +1,256 @@
/*
* \file xf86drmMode.h
* Header for DRM modesetting interface.
*
* \author Jakob Bornecrantz <wallbraker@gmail.com>
*
* \par Acknowledgements:
* Feb 2007, Dave Airlie <airlied@linux.ie>
*/
/*
* Copyright (c) <year> <copyright holders>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <drm.h>
/*
* This is the interface for modesetting for drm.
*
* In order to use this interface you must include either <stdint.h> or another
* header defining uint32_t, int32_t and uint16_t.
*
* It aims to provide a randr1.2 compatible interface for modesettings in the
* kernel, the interface is also ment to be used by libraries like EGL.
*
* More information can be found in randrproto.txt which can be found here:
* http://gitweb.freedesktop.org/?p=xorg/proto/randrproto.git
*
* There are some major diffrences to be noted. Unlike the randr1.2 proto you
* need to create the memory object of the framebuffer yourself with the ttm
* buffer object interface. This object needs to be pinned.
*/
typedef struct _drmModeRes {
int count_fbs;
uint32_t *fbs;
int count_crtcs;
uint32_t *crtcs;
int count_connectors;
uint32_t *connectors;
int count_encoders;
uint32_t *encoders;
uint32_t min_width, max_width;
uint32_t min_height, max_height;
} drmModeRes, *drmModeResPtr;
typedef struct drm_mode_fb_cmd drmModeFB, *drmModeFBPtr;
typedef struct _drmModePropertyBlob {
uint32_t id;
uint32_t length;
void *data;
} drmModePropertyBlobRes, *drmModePropertyBlobPtr;
typedef struct _drmModeProperty {
uint32_t prop_id;
uint32_t flags;
char name[DRM_PROP_NAME_LEN];
int count_values;
uint64_t *values; // store the blob lengths
int count_enums;
struct drm_mode_property_enum *enums;
int count_blobs;
uint32_t *blob_ids; // store the blob IDs
} drmModePropertyRes, *drmModePropertyPtr;
typedef struct _drmModeCrtc {
uint32_t crtc_id;
uint32_t buffer_id; /**< FB id to connect to 0 = disconnect */
uint32_t x, y; /**< Position on the framebuffer */
uint32_t width, height;
int mode_valid;
struct drm_mode_modeinfo mode;
int gamma_size; /**< Number of gamma stops */
} drmModeCrtc, *drmModeCrtcPtr;
typedef struct _drmModeEncoder {
uint32_t encoder_id;
uint32_t encoder_type;
uint32_t crtc_id;
uint32_t possible_crtcs;
uint32_t possible_clones;
} drmModeEncoder, *drmModeEncoderPtr;
typedef enum {
DRM_MODE_CONNECTED = 1,
DRM_MODE_DISCONNECTED = 2,
DRM_MODE_UNKNOWNCONNECTION = 3
} drmModeConnection;
typedef enum {
DRM_MODE_SUBPIXEL_UNKNOWN = 1,
DRM_MODE_SUBPIXEL_HORIZONTAL_RGB = 2,
DRM_MODE_SUBPIXEL_HORIZONTAL_BGR = 3,
DRM_MODE_SUBPIXEL_VERTICAL_RGB = 4,
DRM_MODE_SUBPIXEL_VERTICAL_BGR = 5,
DRM_MODE_SUBPIXEL_NONE = 6
} drmModeSubPixel;
typedef struct _drmModeConnector {
uint32_t connector_id;
uint32_t encoder_id; /**< Encoder currently connected to */
uint32_t connector_type;
uint32_t connector_type_id;
drmModeConnection connection;
uint32_t mmWidth, mmHeight; /**< HxW in millimeters */
drmModeSubPixel subpixel;
int count_modes;
struct drm_mode_modeinfo *modes;
int count_props;
uint32_t *props; /**< List of property ids */
uint64_t *prop_values; /**< List of property values */
int count_encoders;
uint32_t *encoders; /**< List of encoder ids */
} drmModeConnector, *drmModeConnectorPtr;
extern void drmModeFreeModeInfo( struct drm_mode_modeinfo *ptr );
extern void drmModeFreeResources( drmModeResPtr ptr );
extern void drmModeFreeFB( drmModeFBPtr ptr );
extern void drmModeFreeCrtc( drmModeCrtcPtr ptr );
extern void drmModeFreeConnector( drmModeConnectorPtr ptr );
extern void drmModeFreeEncoder( drmModeEncoderPtr ptr );
/**
* Retrives all of the resources associated with a card.
*/
extern drmModeResPtr drmModeGetResources(int fd);
/*
* FrameBuffer manipulation.
*/
/**
* Retrive information about framebuffer bufferId
*/
extern drmModeFBPtr drmModeGetFB(int fd, uint32_t bufferId);
/**
* Creates a new framebuffer with an buffer object as its scanout buffer.
*/
extern int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth,
uint8_t bpp, uint32_t pitch, uint32_t bo_handle,
uint32_t *buf_id);
/**
* Destroies the given framebuffer.
*/
extern int drmModeRmFB(int fd, uint32_t bufferId);
/**
* Replace a framebuffer object with a new one - for resizing the screen.
*/
extern int drmModeReplaceFB(int fd, uint32_t buffer_id,
uint32_t width, uint32_t height, uint8_t depth,
uint8_t bpp, uint32_t pitch, uint32_t bo_handle);
/*
* Crtc functions
*/
/**
* Retrive information about the ctrt crtcId
*/
extern drmModeCrtcPtr drmModeGetCrtc(int fd, uint32_t crtcId);
/**
* Set the mode on a crtc crtcId with the given mode modeId.
*/
int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId,
uint32_t x, uint32_t y, uint32_t *connectors, int count,
struct drm_mode_modeinfo *mode);
/*
* Cursor functions
*/
/**
* Set the cursor on crtc
*/
int drmModeSetCursor(int fd, uint32_t crtcId, uint32_t bo_handle, uint32_t width, uint32_t height);
/**
* Move the cursor on crtc
*/
int drmModeMoveCursor(int fd, uint32_t crtcId, int x, int y);
/**
* Encoder functions
*/
drmModeEncoderPtr drmModeGetEncoder(int fd, uint32_t encoder_id);
/*
* Connector manipulation
*/
/**
* Retrive information about the connector connectorId.
*/
extern drmModeConnectorPtr drmModeGetConnector(int fd,
uint32_t connectorId);
/**
* Attaches the given mode to an connector.
*/
extern int drmModeAttachMode(int fd, uint32_t connectorId, struct drm_mode_modeinfo *mode_info);
/**
* Detaches a mode from the connector
* must be unused, by the given mode.
*/
extern int drmModeDetachMode(int fd, uint32_t connectorId, struct drm_mode_modeinfo *mode_info);
extern drmModePropertyPtr drmModeGetProperty(int fd, uint32_t propertyId);
extern void drmModeFreeProperty(drmModePropertyPtr ptr);
extern drmModePropertyBlobPtr drmModeGetPropertyBlob(int fd, uint32_t blob_id);
extern void drmModeFreePropertyBlob(drmModePropertyBlobPtr ptr);
extern int drmModeConnectorSetProperty(int fd, uint32_t connector_id, uint32_t property_id,
uint64_t value);
extern int drmCheckModesettingSupported(const char *busid);
extern int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size,
uint16_t *red, uint16_t *green, uint16_t *blue);
extern int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size,
uint16_t *red, uint16_t *green, uint16_t *blue);

View File

@ -58,7 +58,7 @@ endif
# Modules for all architectures
MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \
mach64.o nv.o nouveau.o xgi.o
mach64.o nv.o nouveau.o xgi.o radeon_ms.o
# Modules only for ix86 architectures
ifneq (,$(findstring 86,$(MACHINE)))
@ -90,8 +90,9 @@ VIAHEADERS = via_drm.h via_drv.h via_3d_reg.h via_verifier.h $(DRMHEADERS)
MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS)
NVHEADERS = nv_drv.h $(DRMHEADERS)
FFBHEADERS = ffb_drv.h $(DRMHEADERS)
NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS)
NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h nouveau_display.h $(DRMHEADERS)
XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_misc.h xgi_regs.h $(DRMHEADERS)
RADEONMSHEADERS = radeon_ms_driver.h $(DRMHEADERS)
PROGS = dristat drmstat
@ -290,6 +291,7 @@ CONFIG_DRM_MACH64 := n
CONFIG_DRM_NV := n
CONFIG_DRM_NOUVEAU := n
CONFIG_DRM_XGI := n
CONFIG_DRM_RADEON_MS := n
# Enable module builds for the modules requested/supported.
@ -329,6 +331,9 @@ endif
ifneq (,$(findstring xgi,$(DRM_MODULES)))
CONFIG_DRM_XGI := m
endif
ifneq (,$(findstring radeon_ms,$(DRM_MODULES)))
#CONFIG_DRM_RADEON_MS := m
endif
# These require AGP support

View File

@ -12,17 +12,21 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
drm_regman.o drm_vm_nopage_compat.o drm_gem.o
drm_hashtab.o drm_mm.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o \
drm_crtc.o drm_edid.o drm_modes.o drm_crtc_helper.o \
drm_regman.o drm_vm_nopage_compat.o drm_gem.o drm_uncached.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \
i915_opregion.o \
i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_compat.o i915_suspend.o i915_opregion.o \
i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o \
intel_display.o intel_crt.o intel_lvds.o intel_bios.o \
intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
intel_tv.o intel_dvo.o dvo_ch7xxx.o \
dvo_ch7017.o dvo_ivch.o dvo_tfp410.o dvo_sil164.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
@ -32,8 +36,15 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o \
nv04_instmem.o nv50_instmem.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
nv04_instmem.o nv50_instmem.o \
nouveau_bios.o \
nv50_crtc.o nv50_cursor.o nv50_lut.o nv50_fb.o nv50_output.o nv50_sor.o nv50_dac.o nv50_connector.o nv50_i2c.o nv50_display.o \
nv50_kms_wrapper.o \
nv50_fbcon.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o radeon_gem.o \
radeon_buffer.o radeon_fence.o atom.o radeon_display.o radeon_atombios.o radeon_i2c.o radeon_connectors.o radeon_cs.o \
atombios_crtc.o radeon_encoders.o radeon_fb.o radeon_combios.o radeon_legacy_crtc.o radeon_legacy_encoders.o \
radeon_cursor.o radeon_pm.o radeon_gem_proc.o
sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o
savage-objs := savage_drv.o savage_bci.o savage_state.o
@ -47,6 +58,7 @@ xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \
ifeq ($(CONFIG_COMPAT),y)
drm-objs += drm_ioc32.o
radeon-objs += radeon_ioc32.o
radeon_ms-objs += radeon_ms_compat.o
mga-objs += mga_ioc32.o
r128-objs += r128_ioc32.o
i915-objs += i915_ioc32.o
@ -69,3 +81,4 @@ obj-$(CONFIG_DRM_MACH64)+= mach64.o
obj-$(CONFIG_DRM_NV) += nv.o
obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
obj-$(CONFIG_DRM_XGI) += xgi.o
obj-$(CONFIG_DRM_RADEON_MS) += radeon_ms.o

518
linux-core/ObjectID.h Normal file
View File

@ -0,0 +1,518 @@
/*
* Copyright 2006-2007 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/* based on stg/asic_reg/drivers/inc/asic_reg/ObjectID.h ver 23 */
#ifndef _OBJECTID_H
#define _OBJECTID_H
#if defined(_X86_)
#pragma pack(1)
#endif
/****************************************************/
/* Graphics Object Type Definition */
/****************************************************/
#define GRAPH_OBJECT_TYPE_NONE 0x0
#define GRAPH_OBJECT_TYPE_GPU 0x1
#define GRAPH_OBJECT_TYPE_ENCODER 0x2
#define GRAPH_OBJECT_TYPE_CONNECTOR 0x3
#define GRAPH_OBJECT_TYPE_ROUTER 0x4
/* deleted */
/****************************************************/
/* Encoder Object ID Definition */
/****************************************************/
#define ENCODER_OBJECT_ID_NONE 0x00
/* Radeon Class Display Hardware */
#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01
#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02
#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03
#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04
#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06
#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07
/* External Third Party Encoders */
#define ENCODER_OBJECT_ID_SI170B 0x08
#define ENCODER_OBJECT_ID_CH7303 0x09
#define ENCODER_OBJECT_ID_CH7301 0x0A
#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C
#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D
#define ENCODER_OBJECT_ID_TITFP513 0x0E
#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
#define ENCODER_OBJECT_ID_VT1623 0x10
#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19
#define ENCODER_OBJECT_ID_VT1625 0x1A
#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B
#define ENCODER_OBJECT_ID_DP_AN9801 0x1C
#define ENCODER_OBJECT_ID_DP_DP501 0x1D
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY 0x1E
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA 0x1F
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
/****************************************************/
/* Connector Object ID Definition */
/****************************************************/
#define CONNECTOR_OBJECT_ID_NONE 0x00
#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01
#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02
#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03
#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D 0x04
#define CONNECTOR_OBJECT_ID_VGA 0x05
#define CONNECTOR_OBJECT_ID_COMPOSITE 0x06
#define CONNECTOR_OBJECT_ID_SVIDEO 0x07
#define CONNECTOR_OBJECT_ID_YPbPr 0x08
#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09
#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
#define CONNECTOR_OBJECT_ID_SCART 0x0B
#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C
#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D
#define CONNECTOR_OBJECT_ID_LVDS 0x0E
#define CONNECTOR_OBJECT_ID_7PIN_DIN 0x0F
#define CONNECTOR_OBJECT_ID_PCIE_CONNECTOR 0x10
#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11
#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12
#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
/* deleted */
/****************************************************/
/* Router Object ID Definition */
/****************************************************/
#define ROUTER_OBJECT_ID_NONE 0x00
#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01
/****************************************************/
// Graphics Object ENUM ID Definition */
/****************************************************/
#define GRAPH_OBJECT_ENUM_ID1 0x01
#define GRAPH_OBJECT_ENUM_ID2 0x02
#define GRAPH_OBJECT_ENUM_ID3 0x03
#define GRAPH_OBJECT_ENUM_ID4 0x04
#define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06
/****************************************************/
/* Graphics Object ID Bit definition */
/****************************************************/
#define OBJECT_ID_MASK 0x00FF
#define ENUM_ID_MASK 0x0700
#define RESERVED1_ID_MASK 0x0800
#define OBJECT_TYPE_MASK 0x7000
#define RESERVED2_ID_MASK 0x8000
#define OBJECT_ID_SHIFT 0x00
#define ENUM_ID_SHIFT 0x08
#define OBJECT_TYPE_SHIFT 0x0C
/****************************************************/
/* Graphics Object family definition */
/****************************************************/
#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
/****************************************************/
/* GPU Object ID definition - Shared with BIOS */
/****************************************************/
#define GPU_ENUM_ID1 ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
/****************************************************/
/* Encoder Object ID definition - Shared with BIOS */
/****************************************************/
/*
#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102
#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103
#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104
#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105
#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106
#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107
#define ENCODER_SIL170B_ENUM_ID1 0x2108
#define ENCODER_CH7303_ENUM_ID1 0x2109
#define ENCODER_CH7301_ENUM_ID1 0x210A
#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B
#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 0x210C
#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 0x210D
#define ENCODER_TITFP513_ENUM_ID1 0x210E
#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 0x210F
#define ENCODER_VT1623_ENUM_ID1 0x2110
#define ENCODER_HDMI_SI1930_ENUM_ID1 0x2111
#define ENCODER_HDMI_INTERNAL_ENUM_ID1 0x2112
#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113
#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114
#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115
#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
#define ENCODER_SI178_ENUM_ID1 0x2117
#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118
#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119
#define ENCODER_VT1625_ENUM_ID1 0x211A
#define ENCODER_HDMI_SI1932_ENUM_ID1 0x211B
#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1 0x211C
#define ENCODER_DP_DP501_ENUM_ID1 0x211D
#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E
*/
#define ENCODER_INTERNAL_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
#define ENCODER_SIL170B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
#define ENCODER_CH7303_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
#define ENCODER_CH7301_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
#define ENCODER_TITFP513_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
#define ENCODER_VT1623_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
#define ENCODER_HDMI_SI1930_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
#define ENCODER_HDMI_INTERNAL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) // Shared with CV/TV and CRT
#define ENCODER_SI178_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
#define ENCODER_MVPU_FPGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_DDI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
#define ENCODER_VT1625_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
#define ENCODER_HDMI_SI1932_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
#define ENCODER_DP_DP501_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
#define ENCODER_DP_AN9801_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
/*
#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 0x3101
#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 0x3102
#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 0x3103
#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 0x3104
#define CONNECTOR_VGA_ENUM_ID1 0x3105
#define CONNECTOR_COMPOSITE_ENUM_ID1 0x3106
#define CONNECTOR_SVIDEO_ENUM_ID1 0x3107
#define CONNECTOR_YPbPr_ENUM_ID1 0x3108
#define CONNECTOR_D_CONNECTORE_ENUM_ID1 0x3109
#define CONNECTOR_9PIN_DIN_ENUM_ID1 0x310A
#define CONNECTOR_SCART_ENUM_ID1 0x310B
#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 0x310C
#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 0x310D
#define CONNECTOR_LVDS_ENUM_ID1 0x310E
#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F
#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110
*/
#define CONNECTOR_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
#define CONNECTOR_VGA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
#define CONNECTOR_COMPOSITE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
#define CONNECTOR_SVIDEO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
#define CONNECTOR_YPbPr_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
#define CONNECTOR_D_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
#define CONNECTOR_9PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
#define CONNECTOR_SCART_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
#define CONNECTOR_CROSSFIRE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
#define CONNECTOR_CROSSFIRE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
#define CONNECTOR_DISPLAYPORT_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
#define CONNECTOR_DISPLAYPORT_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
#define CONNECTOR_DISPLAYPORT_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
#define CONNECTOR_DISPLAYPORT_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
/****************************************************/
/* Router Object ID definition - Shared with BIOS */
/****************************************************/
#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
/* deleted */
/****************************************************/
/* Object Cap definition - Shared with BIOS */
/****************************************************/
#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L
#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L
#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01
#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02
#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03
#if defined(_X86_)
#pragma pack()
#endif
#endif /*GRAPHICTYPE */

1
linux-core/amd.h Symbolic link
View File

@ -0,0 +1 @@
../shared-core/amd.h

1
linux-core/amd_legacy.h Symbolic link
View File

@ -0,0 +1 @@
../shared-core/amd_legacy.h

View File

@ -0,0 +1 @@
../shared-core/amd_legacy_cbuffer.c

View File

@ -0,0 +1 @@
../shared-core/amd_legacy_fence.h

View File

@ -39,7 +39,7 @@
#define ATI_PCIE_WRITE 0x4
#define ATI_PCIE_READ 0x8
static __inline__ void gart_insert_page_into_table(struct drm_ati_pcigart_info *gart_info, dma_addr_t addr, u32 *pci_gart)
static __inline__ void gart_insert_page_into_table(struct drm_ati_pcigart_info *gart_info, dma_addr_t addr, volatile u32 *pci_gart)
{
u32 page_base;
@ -61,8 +61,28 @@ static __inline__ void gart_insert_page_into_table(struct drm_ati_pcigart_info *
*pci_gart = cpu_to_le32(page_base);
}
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
static __inline__ dma_addr_t gart_get_page_from_table(struct drm_ati_pcigart_info *gart_info, volatile u32 *pci_gart)
{
dma_addr_t retval;
switch(gart_info->gart_reg_if) {
case DRM_ATI_GART_IGP:
retval = (*pci_gart & ATI_PCIGART_PAGE_MASK);
retval += (((*pci_gart & 0xf0) >> 4) << 16) << 16;
break;
case DRM_ATI_GART_PCIE:
retval = (*pci_gart & ~0xc);
retval <<= 8;
break;
case DRM_ATI_GART_PCI:
retval = *pci_gart;
break;
}
return retval;
}
int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
PAGE_SIZE,
@ -70,12 +90,25 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
if (gart_info->table_handle == NULL)
return -ENOMEM;
#ifdef CONFIG_X86
/* IGPs only exist on x86 in any case */
if (gart_info->gart_reg_if == DRM_ATI_GART_IGP)
set_memory_uc((unsigned long)gart_info->table_handle->vaddr, gart_info->table_size >> PAGE_SHIFT);
#endif
memset(gart_info->table_handle->vaddr, 0, gart_info->table_size);
return 0;
}
EXPORT_SYMBOL(drm_ati_alloc_pcigart_table);
static void drm_ati_free_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
#ifdef CONFIG_X86
/* IGPs only exist on x86 in any case */
if (gart_info->gart_reg_if == DRM_ATI_GART_IGP)
set_memory_wb((unsigned long)gart_info->table_handle->vaddr, gart_info->table_size >> PAGE_SHIFT);
#endif
drm_pci_free(dev, gart_info->table_handle);
gart_info->table_handle = NULL;
}
@ -89,7 +122,6 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
/* we need to support large memory configurations */
if (!entry) {
DRM_ERROR("no scatter/gather memory!\n");
return 0;
}
@ -132,12 +164,8 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
int max_pages;
dma_addr_t entry_addr;
if (!entry) {
DRM_ERROR("no scatter/gather memory!\n");
goto done;
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN && gart_info->table_handle == NULL) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
ret = drm_ati_alloc_pcigart_table(dev, gart_info);
@ -145,14 +173,19 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
DRM_ERROR("cannot allocate PCI GART page!\n");
goto done;
}
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
address = gart_info->table_handle->vaddr;
bus_address = gart_info->table_handle->busaddr;
} else {
address = gart_info->addr;
bus_address = gart_info->bus_addr;
DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n",
bus_address, (unsigned long)address);
}
if (!entry) {
DRM_ERROR("no scatter/gather memory!\n");
goto done;
}
pci_gart = (u32 *) address;
@ -161,8 +194,6 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
pages = (entry->pages <= max_pages)
? entry->pages : max_pages;
memset(pci_gart, 0, max_pages * sizeof(u32));
for (i = 0; i < pages; i++) {
/* we need to support large memory configurations */
entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
@ -185,11 +216,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
ret = 1;
#if defined(__i386__) || defined(__x86_64__)
wbinvd();
#else
mb();
#endif
done:
gart_info->addr = address;
@ -197,3 +224,141 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
return ret;
}
EXPORT_SYMBOL(drm_ati_pcigart_init);
static int ati_pcigart_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
{
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
}
static int ati_pcigart_populate(struct drm_ttm_backend *backend,
unsigned long num_pages,
struct page **pages,
struct page *dummy_read_page)
{
struct ati_pcigart_ttm_backend *atipci_be =
container_of(backend, struct ati_pcigart_ttm_backend, backend);
atipci_be->pages = pages;
atipci_be->num_pages = num_pages;
atipci_be->populated = 1;
return 0;
}
static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend,
struct drm_bo_mem_reg *bo_mem)
{
struct ati_pcigart_ttm_backend *atipci_be =
container_of(backend, struct ati_pcigart_ttm_backend, backend);
off_t j;
int i;
struct drm_ati_pcigart_info *info = atipci_be->gart_info;
u32 *pci_gart;
dma_addr_t offset = bo_mem->mm_node->start;
dma_addr_t page_base;
pci_gart = info->addr;
j = offset;
while (j < (offset + atipci_be->num_pages)) {
if (gart_get_page_from_table(info, pci_gart + j))
return -EBUSY;
j++;
}
for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
struct page *cur_page = atipci_be->pages[i];
/* write value */
page_base = page_to_phys(cur_page);
gart_insert_page_into_table(info, page_base, pci_gart + j);
}
mb();
atipci_be->gart_flush_fn(atipci_be->dev);
atipci_be->bound = 1;
atipci_be->offset = offset;
/* need to traverse table and add entries */
DRM_DEBUG("\n");
return 0;
}
static int ati_pcigart_unbind_ttm(struct drm_ttm_backend *backend)
{
struct ati_pcigart_ttm_backend *atipci_be =
container_of(backend, struct ati_pcigart_ttm_backend, backend);
struct drm_ati_pcigart_info *info = atipci_be->gart_info;
unsigned long offset = atipci_be->offset;
int i;
off_t j;
u32 *pci_gart = info->addr;
if (atipci_be->bound != 1)
return -EINVAL;
for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
*(pci_gart + j) = 0;
}
atipci_be->gart_flush_fn(atipci_be->dev);
atipci_be->bound = 0;
atipci_be->offset = 0;
return 0;
}
static void ati_pcigart_clear_ttm(struct drm_ttm_backend *backend)
{
struct ati_pcigart_ttm_backend *atipci_be =
container_of(backend, struct ati_pcigart_ttm_backend, backend);
DRM_DEBUG("\n");
if (atipci_be->pages) {
backend->func->unbind(backend);
atipci_be->pages = NULL;
}
atipci_be->num_pages = 0;
}
static void ati_pcigart_destroy_ttm(struct drm_ttm_backend *backend)
{
struct ati_pcigart_ttm_backend *atipci_be;
if (backend) {
DRM_DEBUG("\n");
atipci_be = container_of(backend, struct ati_pcigart_ttm_backend, backend);
if (atipci_be) {
if (atipci_be->pages) {
backend->func->clear(backend);
}
drm_ctl_free(atipci_be, sizeof(*atipci_be), DRM_MEM_TTM);
}
}
}
static struct drm_ttm_backend_func ati_pcigart_ttm_backend =
{
.needs_ub_cache_adjust = ati_pcigart_needs_unbind_cache_adjust,
.populate = ati_pcigart_populate,
.clear = ati_pcigart_clear_ttm,
.bind = ati_pcigart_bind_ttm,
.unbind = ati_pcigart_unbind_ttm,
.destroy = ati_pcigart_destroy_ttm,
};
struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev))
{
struct ati_pcigart_ttm_backend *atipci_be;
atipci_be = drm_ctl_calloc(1, sizeof (*atipci_be), DRM_MEM_TTM);
if (!atipci_be)
return NULL;
atipci_be->populated = 0;
atipci_be->backend.func = &ati_pcigart_ttm_backend;
// atipci_be->backend.mem_type = DRM_BO_MEM_TT;
atipci_be->gart_info = info;
atipci_be->gart_flush_fn = gart_flush_fn;
atipci_be->dev = dev;
return &atipci_be->backend;
}
EXPORT_SYMBOL(ati_pcigart_init_ttm);

48
linux-core/atom-bits.h Normal file
View File

@ -0,0 +1,48 @@
/*
* Copyright 2008 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Stanislaw Skowronek
*/
#ifndef ATOM_BITS_H
#define ATOM_BITS_H
static inline uint8_t get_u8(void *bios, int ptr)
{
return ((unsigned char *)bios)[ptr];
}
#define U8(ptr) get_u8(ctx->ctx->bios,(ptr))
#define CU8(ptr) get_u8(ctx->bios,(ptr))
static inline uint16_t get_u16(void *bios, int ptr)
{
return get_u8(bios,ptr)|(((uint16_t)get_u8(bios,ptr+1))<<8);
}
#define U16(ptr) get_u16(ctx->ctx->bios,(ptr))
#define CU16(ptr) get_u16(ctx->bios,(ptr))
static inline uint32_t get_u32(void *bios, int ptr)
{
return get_u16(bios,ptr)|(((uint32_t)get_u16(bios,ptr+2))<<16);
}
#define U32(ptr) get_u32(ctx->ctx->bios,(ptr))
#define CU32(ptr) get_u32(ctx->bios,(ptr))
#define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
#endif

100
linux-core/atom-names.h Normal file
View File

@ -0,0 +1,100 @@
/*
* Copyright 2008 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Stanislaw Skowronek
*/
#ifndef ATOM_NAMES_H
#define ATOM_NAMES_H
#include "atom.h"
#ifdef ATOM_DEBUG
#define ATOM_OP_NAMES_CNT 123
static char *atom_op_names[ATOM_OP_NAMES_CNT]={
"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL",
"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS",
"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG",
"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS",
"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS",
"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB",
"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT",
"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS",
"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH",
"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL",
"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS",
"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC",
"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB",
"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS",
"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG",
"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB",
"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL",
"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC",
"DEBUG", "CTB_DS",
};
#define ATOM_TABLE_NAMES_CNT 74
static char *atom_table_names[ATOM_TABLE_NAMES_CNT]={
"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock",
"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice",
"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController",
"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange",
"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl",
"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl",
"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl",
"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl",
"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock",
"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing",
"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source",
"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters",
"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock",
"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection",
"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp",
"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C",
"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection",
"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion",
"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining",
"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl",
"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource",
"MemoryDeviceInit", "EnableYUV",
};
#define ATOM_IO_NAMES_CNT 5
static char *atom_io_names[ATOM_IO_NAMES_CNT]={
"MM", "PLL", "MC", "PCIE", "PCIE PORT",
};
#else
#define ATOM_OP_NAMES_CNT 0
#define ATOM_TABLE_NAMES_CNT 0
#define ATOM_IO_NAMES_CNT 0
#endif
#endif

42
linux-core/atom-types.h Normal file
View File

@ -0,0 +1,42 @@
/*
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Dave Airlie
*/
#ifndef ATOM_TYPES_H
#define ATOM_TYPES_H
/* sync atom types to kernel types */
typedef uint16_t USHORT;
typedef uint32_t ULONG;
typedef uint8_t UCHAR;
#ifndef ATOM_BIG_ENDIAN
#if defined(__BIG_ENDIAN)
#define ATOM_BIG_ENDIAN 1
#else
#define ATOM_BIG_ENDIAN 0
#endif
#endif
#endif

1143
linux-core/atom.c Normal file

File diff suppressed because it is too large Load Diff

148
linux-core/atom.h Normal file
View File

@ -0,0 +1,148 @@
/*
* Copyright 2008 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Stanislaw Skowronek
*/
#ifndef ATOM_H
#define ATOM_H
#include <linux/types.h>
#include "drmP.h"
#define ATOM_BIOS_MAGIC 0xAA55
#define ATOM_ATI_MAGIC_PTR 0x30
#define ATOM_ATI_MAGIC " 761295520"
#define ATOM_ROM_TABLE_PTR 0x48
#define ATOM_ROM_MAGIC "ATOM"
#define ATOM_ROM_MAGIC_PTR 4
#define ATOM_ROM_MSG_PTR 0x10
#define ATOM_ROM_CMD_PTR 0x1E
#define ATOM_ROM_DATA_PTR 0x20
#define ATOM_CMD_INIT 0
#define ATOM_CMD_SETSCLK 0x0A
#define ATOM_CMD_SETMCLK 0x0B
#define ATOM_CMD_SETPCLK 0x0C
#define ATOM_DATA_FWI_PTR 0xC
#define ATOM_DATA_IIO_PTR 0x32
#define ATOM_FWI_DEFSCLK_PTR 8
#define ATOM_FWI_DEFMCLK_PTR 0xC
#define ATOM_FWI_MAXSCLK_PTR 0x24
#define ATOM_FWI_MAXMCLK_PTR 0x28
#define ATOM_CT_SIZE_PTR 0
#define ATOM_CT_WS_PTR 4
#define ATOM_CT_PS_PTR 5
#define ATOM_CT_PS_MASK 0x7F
#define ATOM_CT_CODE_PTR 6
#define ATOM_OP_CNT 123
#define ATOM_OP_EOT 91
#define ATOM_CASE_MAGIC 0x63
#define ATOM_CASE_END 0x5A5A
#define ATOM_ARG_REG 0
#define ATOM_ARG_PS 1
#define ATOM_ARG_WS 2
#define ATOM_ARG_ID 4
#define ATOM_ARG_FB 3
#define ATOM_ARG_IMM 5
#define ATOM_ARG_PLL 6
#define ATOM_ARG_MC 7
#define ATOM_SRC_DWORD 0
#define ATOM_SRC_WORD0 1
#define ATOM_SRC_WORD8 2
#define ATOM_SRC_WORD16 3
#define ATOM_SRC_BYTE0 4
#define ATOM_SRC_BYTE8 5
#define ATOM_SRC_BYTE16 6
#define ATOM_SRC_BYTE24 7
#define ATOM_WS_QUOTIENT 0x40
#define ATOM_WS_REMAINDER 0x41
#define ATOM_WS_DATAPTR 0x42
#define ATOM_WS_SHIFT 0x43
#define ATOM_WS_OR_MASK 0x44
#define ATOM_WS_AND_MASK 0x45
#define ATOM_WS_FB_WINDOW 0x46
#define ATOM_WS_ATTRIBUTES 0x47
#define ATOM_IIO_NOP 0
#define ATOM_IIO_START 1
#define ATOM_IIO_READ 2
#define ATOM_IIO_WRITE 3
#define ATOM_IIO_CLEAR 4
#define ATOM_IIO_SET 5
#define ATOM_IIO_MOVE_INDEX 6
#define ATOM_IIO_MOVE_ATTR 7
#define ATOM_IIO_MOVE_DATA 8
#define ATOM_IIO_END 9
#define ATOM_IO_MM 0
#define ATOM_IO_PCI 1
#define ATOM_IO_SYSIO 2
#define ATOM_IO_IIO 0x80
struct card_info {
struct drm_device *dev;
void (* reg_write)(struct card_info *, uint32_t, uint32_t); // filled by driver
uint32_t (* reg_read)(struct card_info *, uint32_t); // filled by driver
void (* mc_write)(struct card_info *, uint32_t, uint32_t); // filled by driver
uint32_t (* mc_read)(struct card_info *, uint32_t); // filled by driver
// int (* read_rom)(struct card_info *, uint8_t *); // filled by driver
};
struct atom_context {
struct card_info *card;
void *bios;
uint32_t cmd_table, data_table;
uint16_t *iio;
uint16_t data_block;
uint32_t fb_base;
uint32_t divmul[2];
uint16_t io_attr;
uint16_t reg_block;
uint8_t shift;
int cs_equal, cs_above;
int io_mode;
};
extern int atom_debug;
struct atom_context *atom_parse(struct card_info *, void *);
void atom_execute_table(struct atom_context *, int, uint32_t *);
int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *);
void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
#include "atom-types.h"
#include "atombios.h"
#include "ObjectID.h"
#endif

5025
linux-core/atombios.h Normal file

File diff suppressed because it is too large Load Diff

462
linux-core/atombios_crtc.c Normal file
View File

@ -0,0 +1,462 @@
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include "drm_crtc_helper.h"
#include "atom.h"
#include "atom-bits.h"
static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_radeon_private *dev_priv = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
ENABLE_CRTC_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
args.ucCRTC = radeon_crtc->crtc_id;
args.ucEnable = lock;
atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args);
}
static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_radeon_private *dev_priv = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
ENABLE_CRTC_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
args.ucCRTC = radeon_crtc->crtc_id;
args.ucEnable = state;
atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args);
}
static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_radeon_private *dev_priv = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq);
ENABLE_CRTC_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
args.ucCRTC = radeon_crtc->crtc_id;
args.ucEnable = state;
atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args);
}
static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_radeon_private *dev_priv = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
BLANK_CRTC_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
args.ucCRTC = radeon_crtc->crtc_id;
args.ucBlanking = state;
atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&args);
}
void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_radeon_private *dev_priv = dev->dev_private;
switch(mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
if (radeon_is_dce3(dev_priv))
atombios_enable_crtc_memreq(crtc, 1);
atombios_enable_crtc(crtc, 1);
atombios_blank_crtc(crtc, 0);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_OFF:
atombios_blank_crtc(crtc, 1);
atombios_enable_crtc(crtc, 0);
if (radeon_is_dce3(dev_priv))
atombios_enable_crtc_memreq(crtc, 0);
break;
}
}
static void
atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, SET_CRTC_USING_DTD_TIMING_PARAMETERS *crtc_param)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_radeon_private *dev_priv = dev->dev_private;
SET_CRTC_USING_DTD_TIMING_PARAMETERS conv_param;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
conv_param.usH_Size = cpu_to_le16(crtc_param->usH_Size);
conv_param.usH_Blanking_Time = cpu_to_le16(crtc_param->usH_Blanking_Time);
conv_param.usV_Size = cpu_to_le16(crtc_param->usV_Size);
conv_param.usV_Blanking_Time = cpu_to_le16(crtc_param->usV_Blanking_Time);
conv_param.usH_SyncOffset = cpu_to_le16(crtc_param->usH_SyncOffset);
conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
conv_param.usV_SyncOffset = cpu_to_le16(crtc_param->usV_SyncOffset);
conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
conv_param.susModeMiscInfo.usAccess = cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
conv_param.ucCRTC = crtc_param->ucCRTC;
printk("executing set crtc dtd timing\n");
atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&conv_param);
}
void atombios_crtc_set_timing(struct drm_crtc *crtc, SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_param)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_radeon_private *dev_priv = dev->dev_private;
SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION conv_param;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
conv_param.usH_Total = cpu_to_le16(crtc_param->usH_Total);
conv_param.usH_Disp = cpu_to_le16(crtc_param->usH_Disp);
conv_param.usH_SyncStart = cpu_to_le16(crtc_param->usH_SyncStart);
conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
conv_param.usV_Total = cpu_to_le16(crtc_param->usV_Total);
conv_param.usV_Disp = cpu_to_le16(crtc_param->usV_Disp);
conv_param.usV_SyncStart = cpu_to_le16(crtc_param->usV_SyncStart);
conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
conv_param.susModeMiscInfo.usAccess = cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
conv_param.ucCRTC = crtc_param->ucCRTC;
conv_param.ucOverscanRight = crtc_param->ucOverscanRight;
conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft;
conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom;
conv_param.ucOverscanTop = crtc_param->ucOverscanTop;
conv_param.ucReserved = crtc_param->ucReserved;
printk("executing set crtc timing\n");
atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&conv_param);
}
void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_radeon_private *dev_priv = dev->dev_private;
uint8_t frev, crev;
int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
SET_PIXEL_CLOCK_PS_ALLOCATION spc_param;
PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
uint32_t sclock = mode->clock;
uint32_t ref_div = 0, fb_div = 0, post_div = 0;
struct radeon_pll *pll;
int pll_flags = 0;
memset(&spc_param, 0, sizeof(SET_PIXEL_CLOCK_PS_ALLOCATION));
if (!radeon_is_avivo(dev_priv))
pll_flags |= RADEON_PLL_LEGACY;
if (mode->clock > 200000) /* range limits??? */
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
if (radeon_crtc->crtc_id == 0)
pll = &dev_priv->mode_info.p1pll;
else
pll = &dev_priv->mode_info.p2pll;
radeon_compute_pll(pll, mode->clock, &sclock,
&fb_div, &ref_div, &post_div, pll_flags);
if (radeon_is_avivo(dev_priv)) {
uint32_t ss_cntl;
if (radeon_crtc->crtc_id == 0) {
ss_cntl = RADEON_READ(AVIVO_P1PLL_INT_SS_CNTL);
RADEON_WRITE(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1);
} else {
ss_cntl = RADEON_READ(AVIVO_P2PLL_INT_SS_CNTL);
RADEON_WRITE(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1);
}
}
/* */
atom_parse_cmd_header(dev_priv->mode_info.atom_context, index, &frev, &crev);
switch(frev) {
case 1:
switch(crev) {
case 1:
case 2:
spc2_ptr = (PIXEL_CLOCK_PARAMETERS_V2*)&spc_param.sPCLKInput;
spc2_ptr->usPixelClock = cpu_to_le16(sclock);
spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
spc2_ptr->ucPostDiv = post_div;
spc2_ptr->ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
spc2_ptr->ucCRTC = radeon_crtc->crtc_id;
spc2_ptr->ucRefDivSrc = 1;
break;
case 3:
spc3_ptr = (PIXEL_CLOCK_PARAMETERS_V3*)&spc_param.sPCLKInput;
spc3_ptr->usPixelClock = cpu_to_le16(sclock);
spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
spc3_ptr->ucPostDiv = post_div;
spc3_ptr->ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
/* TODO insert output encoder object stuff herre for r600 */
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
}
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
}
printk("executing set pll\n");
atom_execute_table(dev_priv->mode_info.atom_context, index, (uint32_t *)&spc_param);
}
void atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_radeon_private *dev_priv = dev->dev_private;
struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
struct drm_radeon_gem_object *obj_priv;
uint32_t fb_location, fb_format, fb_pitch_pixels;
if (!crtc->fb)
return;
radeon_fb = to_radeon_framebuffer(crtc->fb);
obj = radeon_fb->obj;
obj_priv = obj->driver_private;
fb_location = obj_priv->bo->offset + dev_priv->fb_location;
switch(crtc->fb->bits_per_pixel) {
case 15:
fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
break;
case 16:
fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
break;
case 24:
case 32:
fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
break;
default:
DRM_ERROR("Unsupported screen depth %d\n", crtc->fb->bits_per_pixel);
return;
}
/* TODO tiling */
if (radeon_crtc->crtc_id == 0)
RADEON_WRITE(AVIVO_D1VGA_CONTROL, 0);
else
RADEON_WRITE(AVIVO_D2VGA_CONTROL, 0);
RADEON_WRITE(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, fb_location);
RADEON_WRITE(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, fb_location);
RADEON_WRITE(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
RADEON_WRITE(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
RADEON_WRITE(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
RADEON_WRITE(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, x);
RADEON_WRITE(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, y);
RADEON_WRITE(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, x + crtc->mode.hdisplay);
RADEON_WRITE(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, y + crtc->mode.vdisplay);
fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
RADEON_WRITE(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
RADEON_WRITE(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
RADEON_WRITE(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
crtc->mode.vdisplay);
RADEON_WRITE(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, (x << 16) | y);
RADEON_WRITE(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
RADEON_WRITE(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
AVIVO_D1MODE_INTERLEAVE_EN);
else
RADEON_WRITE(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
0);
}
void atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_radeon_private *dev_priv = dev->dev_private;
struct drm_encoder *encoder;
SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing;
/* TODO color tiling */
memset(&crtc_timing, 0, sizeof(crtc_timing));
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
}
crtc_timing.ucCRTC = radeon_crtc->crtc_id;
crtc_timing.usH_Total = adjusted_mode->crtc_htotal;
crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay;
crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start;
crtc_timing.usH_SyncWidth = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
crtc_timing.usV_Total = adjusted_mode->crtc_vtotal;
crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay;
crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start;
crtc_timing.usV_SyncWidth = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
atombios_crtc_set_pll(crtc, adjusted_mode);
atombios_crtc_set_timing(crtc, &crtc_timing);
if (radeon_is_avivo(dev_priv))
atombios_crtc_set_base(crtc, x, y);
else {
if (radeon_crtc->crtc_id == 0) {
SET_CRTC_USING_DTD_TIMING_PARAMETERS crtc_dtd_timing;
memset(&crtc_dtd_timing, 0, sizeof(crtc_dtd_timing));
/* setup FP shadow regs on R4xx */
crtc_dtd_timing.ucCRTC = radeon_crtc->crtc_id;
crtc_dtd_timing.usH_Size = adjusted_mode->crtc_hdisplay;
crtc_dtd_timing.usV_Size = adjusted_mode->crtc_vdisplay;
crtc_dtd_timing.usH_Blanking_Time = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hdisplay;
crtc_dtd_timing.usV_Blanking_Time = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vdisplay;
crtc_dtd_timing.usH_SyncOffset = adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_hdisplay;
crtc_dtd_timing.usV_SyncOffset = adjusted_mode->crtc_vsync_start - adjusted_mode->crtc_vdisplay;
crtc_dtd_timing.usH_SyncWidth = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
crtc_dtd_timing.usV_SyncWidth = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
//crtc_dtd_timing.ucH_Border = adjusted_mode->crtc_hborder;
//crtc_dtd_timing.ucV_Border = adjusted_mode->crtc_vborder;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
crtc_dtd_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
crtc_dtd_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
crtc_dtd_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
crtc_dtd_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
crtc_dtd_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
atombios_set_crtc_dtd_timing(crtc, &crtc_dtd_timing);
}
radeon_crtc_set_base(crtc, x, y);
radeon_legacy_atom_set_surface(crtc);
}
}
static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
atombios_lock_crtc(crtc, 1);
}
static void atombios_crtc_commit(struct drm_crtc *crtc)
{
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
atombios_lock_crtc(crtc, 0);
}
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.dpms = atombios_crtc_dpms,
.mode_fixup = atombios_crtc_mode_fixup,
.mode_set = atombios_crtc_mode_set,
.mode_set_base = atombios_crtc_set_base,
.prepare = atombios_crtc_prepare,
.commit = atombios_crtc_commit,
};
void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc)
{
if (radeon_crtc->crtc_id == 1)
radeon_crtc->crtc_offset = AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
}

View File

@ -54,6 +54,7 @@
#include <linux/smp_lock.h> /* For (un)lock_kernel */
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/kref.h>
#include <linux/pagemap.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
@ -112,7 +113,8 @@ typedef unsigned long uintptr_t;
#define DRIVER_IRQ_SHARED 0x80
#define DRIVER_DMA_QUEUE 0x100
#define DRIVER_FB_DMA 0x200
#define DRIVER_GEM 0x400
#define DRIVER_MODESET 0x400
#define DRIVER_GEM 0x800
/*@}*/
@ -164,7 +166,6 @@ typedef unsigned long uintptr_t;
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
#define DRM_MAP_HASH_OFFSET 0x10000000
#define DRM_MAP_HASH_ORDER 12
#define DRM_OBJECT_HASH_ORDER 12
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
/*
@ -266,11 +267,11 @@ typedef unsigned long uintptr_t;
*/
#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \
do { \
if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
dev->lock.file_priv != file_priv ) { \
if ( !_DRM_LOCK_IS_HELD( file_priv->master->lock.hw_lock->lock ) || \
file_priv->master->lock.file_priv != file_priv ) { \
DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
__FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
dev->lock.file_priv, file_priv ); \
__FUNCTION__, _DRM_LOCK_IS_HELD( file_priv->master->lock.hw_lock->lock ),\
file_priv->master->lock.file_priv, file_priv ); \
return -EINVAL; \
} \
} while (0)
@ -304,6 +305,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
#define DRM_AUTH 0x1
#define DRM_MASTER 0x2
#define DRM_ROOT_ONLY 0x4
#define DRM_CONTROL_ALLOW 0x8 // allow ioctl to operate on control node
struct drm_ioctl_desc {
unsigned int cmd;
@ -402,44 +404,29 @@ struct drm_buf_entry {
struct drm_freelist freelist;
};
enum drm_ref_type {
_DRM_REF_USE = 0,
_DRM_REF_TYPE1,
_DRM_NO_REF_TYPES
};
/** File private data */
struct drm_file {
int authenticated;
int master;
pid_t pid;
uid_t uid;
drm_magic_t magic;
unsigned long ioctl_count;
struct list_head lhead;
struct drm_minor *minor;
int remove_auth_on_close;
unsigned long lock_count;
/*
* The user object hash table is global and resides in the
* drm_device structure. We protect the lists and hash tables with the
* device struct_mutex. A bit coarse-grained but probably the best
* option.
*/
struct list_head refd_objects;
/** Mapping of mm object handles to object pointers. */
struct idr object_idr;
/** Lock for synchronization of access to object_idr. */
spinlock_t table_lock;
struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
struct file *filp;
void *driver_priv;
int is_master; /* this file private is a master for a minor */
struct drm_master *master; /* master this node is currently associated with
N.B. not always minor->master */
struct list_head fbs;
};
/** Wait queue */
@ -573,6 +560,8 @@ struct drm_map_list {
struct drm_hash_item hash;
struct drm_map *map; /**< mapping */
uint64_t user_token;
struct drm_master *master; /** if this map is associated with a specific
master */
struct drm_mm_node *file_offset_node;
};
@ -594,6 +583,13 @@ struct drm_vbl_sig {
struct task_struct *task;
};
struct drm_hotplug_sig {
struct list_head head;
unsigned int counter;
struct siginfo info;
struct task_struct *task;
};
/* location of GART table */
#define DRM_ATI_GART_MAIN 1
#define DRM_ATI_GART_FB 2
@ -665,6 +661,31 @@ struct drm_gem_object {
};
#include "drm_objects.h"
#include "drm_crtc.h"
/* per-master structure */
struct drm_master {
struct kref refcount; /* refcount for this master */
struct list_head head; /**< each minor contains a list of masters */
struct drm_minor *minor; /**< link back to minor we are a master for */
char *unique; /**< Unique identifier: e.g., busid */
int unique_len; /**< Length of unique field */
int blocked; /**< Blocked due to VC switch? */
/** \name Authentication */
/*@{ */
struct drm_open_hash magiclist;
struct list_head magicfree;
/*@} */
struct drm_lock_data lock; /**< Information on hardware lock */
void *driver_priv; /**< Private structure for driver to use */
};
/**
* DRM driver structure. This structure represent the common code for
@ -765,6 +786,10 @@ struct drm_driver {
void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
/* Master routines */
int (*master_create)(struct drm_device *dev, struct drm_master *master);
void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
int (*proc_init)(struct drm_minor *minor);
void (*proc_cleanup)(struct drm_minor *minor);
@ -798,6 +823,8 @@ struct drm_driver {
#define DRM_MINOR_UNASSIGNED 0
#define DRM_MINOR_LEGACY 1
#define DRM_MINOR_CONTROL 2
#define DRM_MINOR_RENDER 3
/**
* DRM minor structure. This structure represents a drm minor number.
@ -808,8 +835,15 @@ struct drm_minor {
dev_t device; /**< Device number for mknod */
struct device kdev; /**< Linux device */
struct drm_device *dev;
/* for render nodes */
struct proc_dir_entry *dev_root; /**< proc directory entry */
struct class_device *dev_class;
/* for control nodes - a pointer to the current master for this control node */
struct drm_master *master; /* currently active master for this node */
struct list_head master_list;
struct drm_mode_group mode_group;
};
@ -818,13 +852,9 @@ struct drm_minor {
* may contain multiple heads.
*/
struct drm_device {
char *unique; /**< Unique identifier: e.g., busid */
int unique_len; /**< Length of unique field */
char *devname; /**< For /proc/interrupts */
int if_version; /**< Highest interface version set */
int blocked; /**< Blocked due to VC switch? */
/** \name Locks */
/*@{ */
spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
@ -847,12 +877,6 @@ struct drm_device {
atomic_t counts[15];
/*@} */
/** \name Authentication */
/*@{ */
struct list_head filelist;
struct drm_open_hash magiclist;
struct list_head magicfree;
/*@} */
/** \name Memory management */
/*@{ */
@ -860,7 +884,6 @@ struct drm_device {
int map_count; /**< Number of mappable regions */
struct drm_open_hash map_hash; /**< User token hash table for maps */
struct drm_mm offset_manager; /**< User token manager */
struct drm_open_hash object_hash; /**< User token hash table for objects */
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
struct page *ttm_dummy_page;
@ -873,7 +896,9 @@ struct drm_device {
struct idr ctx_idr;
struct list_head vmalist; /**< List of vmas (for debugging) */
struct drm_lock_data lock; /**< Information on hardware lock */
struct list_head filelist;
/*@} */
/** \name DMA queues (contexts) */
@ -885,6 +910,7 @@ struct drm_device {
struct drm_device_dma *dma; /**< Optional pointer for DMA support */
/*@} */
/** \name Context support */
/*@{ */
int irq; /**< Interrupt used by board */
@ -901,6 +927,15 @@ struct drm_device {
struct work_struct work;
/** \name HOTPLUG IRQ support */
/*@{ */
wait_queue_head_t hotplug_queue; /**< HOTPLUG wait queue */
spinlock_t hotplug_lock;
struct list_head *hotplug_sigs; /**< signal list to send on HOTPLUG */
atomic_t hotplug_signal_pending; /* number of signals pending on all crtcs*/
/*@} */
/** \name VBLANK IRQ support */
/*@{ */
@ -954,6 +989,9 @@ struct drm_device {
struct drm_driver *driver;
drm_local_map_t *agp_buffer_map;
unsigned int agp_buffer_token;
/* minor number for control node */
struct drm_minor *control;
struct drm_minor *primary; /**< render type primary screen head */
struct drm_fence_manager fm;
@ -965,6 +1003,9 @@ struct drm_device {
struct idr drw_idr;
/*@} */
/* DRM mode setting */
struct drm_mode_config mode_config;
/** \name GEM information */
/*@{ */
spinlock_t object_name_lock;
@ -989,7 +1030,18 @@ struct drm_agp_ttm_backend {
int populated;
};
#endif
struct ati_pcigart_ttm_backend {
struct drm_ttm_backend backend;
int populated;
void (*gart_flush_fn)(struct drm_device *dev);
struct drm_ati_pcigart_info *gart_info;
unsigned long offset;
struct page **pages;
int num_pages;
int bound;
struct drm_device *dev;
};
extern struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev));
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
@ -1238,12 +1290,16 @@ extern void drm_driver_irq_preinstall(struct drm_device *dev);
extern void drm_driver_irq_postinstall(struct drm_device *dev);
extern void drm_driver_irq_uninstall(struct drm_device *dev);
extern int drm_hotplug_init(struct drm_device *dev);
extern int drm_wait_hotplug(struct drm_device *dev, void *data, struct drm_file *filp);
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp);
extern int drm_wait_hotplug(struct drm_device *dev, void *data, struct drm_file *filp);
extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq);
extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
extern void drm_handle_vblank(struct drm_device *dev, int crtc);
extern void drm_handle_hotplug(struct drm_device *dev);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
@ -1288,10 +1344,17 @@ extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev);
extern void drm_agp_chipset_flush(struct drm_device *dev);
/* Stub support (drm_stub.h) */
extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
struct drm_master *drm_master_create(struct drm_minor *minor);
extern struct drm_master *drm_master_get(struct drm_master *master);
extern void drm_master_put(struct drm_master **master);
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
struct drm_driver *driver);
extern int drm_put_dev(struct drm_device *dev);
extern int drm_put_minor(struct drm_device *dev);
extern int drm_put_minor(struct drm_minor **minor_p);
extern unsigned int drm_debug; /* 1 to enable debug output */
extern struct class *drm_class;
@ -1317,6 +1380,8 @@ extern int drm_sg_free(struct drm_device *dev, void *data,
/* ATI PCIGART support (ati_pcigart.h) */
extern int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
extern int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info);
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr);
@ -1328,7 +1393,11 @@ struct drm_sysfs_class;
extern struct class *drm_sysfs_create(struct module *owner, char *name);
extern void drm_sysfs_destroy(void);
extern int drm_sysfs_device_add(struct drm_minor *minor);
extern void drm_sysfs_hotplug_event(struct drm_device *dev);
extern void drm_sysfs_device_remove(struct drm_minor *minor);
extern char *drm_get_connector_status_name(enum drm_connector_status status);
extern int drm_sysfs_connector_add(struct drm_connector *connector);
extern void drm_sysfs_connector_remove(struct drm_connector *connector);
/*
* Basic memory manager support (drm_mm.c)

View File

@ -45,14 +45,15 @@
* the one with matching magic number, while holding the drm_device::struct_mutex
* lock.
*/
static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic)
static struct drm_file *drm_find_file(struct drm_master *master , drm_magic_t magic)
{
struct drm_file *retval = NULL;
struct drm_magic_entry *pt;
struct drm_hash_item *hash;
struct drm_device *dev = master->minor->dev;
mutex_lock(&dev->struct_mutex);
if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
retval = pt->priv;
}
@ -71,11 +72,11 @@ static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic
* associated the magic number hash key in drm_device::magiclist, while holding
* the drm_device::struct_mutex lock.
*/
static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
static int drm_add_magic(struct drm_master *master, struct drm_file * priv,
drm_magic_t magic)
{
struct drm_magic_entry *entry;
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
@ -85,8 +86,8 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
entry->priv = priv;
entry->hash_item.key = (unsigned long)magic;
mutex_lock(&dev->struct_mutex);
drm_ht_insert_item(&dev->magiclist, &entry->hash_item);
list_add_tail(&entry->head, &dev->magicfree);
drm_ht_insert_item(&master->magiclist, &entry->hash_item);
list_add_tail(&entry->head, &master->magicfree);
mutex_unlock(&dev->struct_mutex);
return 0;
@ -101,20 +102,21 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
* Searches and unlinks the entry in drm_device::magiclist with the magic
* number hash key, while holding the drm_device::struct_mutex lock.
*/
static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
{
struct drm_magic_entry *pt;
struct drm_hash_item *hash;
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
mutex_lock(&dev->struct_mutex);
if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
drm_ht_remove_item(&dev->magiclist, hash);
drm_ht_remove_item(&master->magiclist, hash);
list_del(&pt->head);
mutex_unlock(&dev->struct_mutex);
@ -152,9 +154,9 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
++sequence; /* reserve 0 */
auth->magic = sequence++;
spin_unlock(&lock);
} while (drm_find_file(dev, auth->magic));
} while (drm_find_file(file_priv->master, auth->magic));
file_priv->magic = auth->magic;
drm_add_magic(dev, file_priv, auth->magic);
drm_add_magic(file_priv->master, file_priv, auth->magic);
}
DRM_DEBUG("%u\n", auth->magic);
@ -180,9 +182,9 @@ int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file;
DRM_DEBUG("%u\n", auth->magic);
if ((file = drm_find_file(dev, auth->magic))) {
if ((file = drm_find_file(file_priv->master, auth->magic))) {
file->authenticated = 1;
drm_remove_magic(dev, auth->magic);
drm_remove_magic(file_priv->master, auth->magic);
return 0;
}
return -EINVAL;

File diff suppressed because it is too large Load Diff

View File

@ -1,189 +0,0 @@
/**************************************************************************
*
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
/*
* This file implements a simple replacement for the buffer manager use
* of the heavyweight hardware lock.
* The lock is a read-write lock. Taking it in read mode is fast, and
* intended for in-kernel use only.
* Taking it in write mode is slow.
*
* The write mode is used only when there is a need to block all
* user-space processes from allocating a
* new memory area.
* Typical use in write mode is X server VT switching, and it's allowed
* to leave kernel space with the write lock held. If a user-space process
* dies while having the write-lock, it will be released during the file
* descriptor release.
*
* The read lock is typically placed at the start of an IOCTL- or
* user-space callable function that may end up allocating a memory area.
* This includes setstatus, super-ioctls and no_pfn; the latter may move
* unmappable regions to mappable. It's a bug to leave kernel space with the
* read lock held.
*
* Both read- and write lock taking may be interruptible for low signal-delivery
* latency. The locking functions will return -EAGAIN if interrupted by a
* signal.
*
* Locking order: The lock should be taken BEFORE any kernel mutexes
* or spinlocks.
*/
#include "drmP.h"
void drm_bo_init_lock(struct drm_bo_lock *lock)
{
DRM_INIT_WAITQUEUE(&lock->queue);
atomic_set(&lock->write_lock_pending, 0);
atomic_set(&lock->readers, 0);
}
void drm_bo_read_unlock(struct drm_bo_lock *lock)
{
if (atomic_dec_and_test(&lock->readers))
wake_up_all(&lock->queue);
}
EXPORT_SYMBOL(drm_bo_read_unlock);
int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible)
{
while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
int ret;
if (!interruptible) {
wait_event(lock->queue,
atomic_read(&lock->write_lock_pending) == 0);
continue;
}
ret = wait_event_interruptible
(lock->queue, atomic_read(&lock->write_lock_pending) == 0);
if (ret)
return -EAGAIN;
}
while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
int ret;
if (!interruptible) {
wait_event(lock->queue,
atomic_read(&lock->readers) != -1);
continue;
}
ret = wait_event_interruptible
(lock->queue, atomic_read(&lock->readers) != -1);
if (ret)
return -EAGAIN;
}
return 0;
}
EXPORT_SYMBOL(drm_bo_read_lock);
static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
{
if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
return -EINVAL;
wake_up_all(&lock->queue);
return 0;
}
static void drm_bo_write_lock_remove(struct drm_file *file_priv,
struct drm_user_object *item)
{
struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base);
int ret;
ret = __drm_bo_write_unlock(lock);
BUG_ON(ret);
}
int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible,
struct drm_file *file_priv)
{
int ret = 0;
struct drm_device *dev;
atomic_inc(&lock->write_lock_pending);
while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
if (!interruptible) {
wait_event(lock->queue,
atomic_read(&lock->readers) == 0);
continue;
}
ret = wait_event_interruptible
(lock->queue, atomic_read(&lock->readers) == 0);
if (ret) {
atomic_dec(&lock->write_lock_pending);
wake_up_all(&lock->queue);
return -EAGAIN;
}
}
/*
* Add a dummy user-object, the destructor of which will
* make sure the lock is released if the client dies
* while holding it.
*/
if (atomic_dec_and_test(&lock->write_lock_pending))
wake_up_all(&lock->queue);
dev = file_priv->minor->dev;
mutex_lock(&dev->struct_mutex);
ret = drm_add_user_object(file_priv, &lock->base, 0);
lock->base.remove = &drm_bo_write_lock_remove;
lock->base.type = drm_lock_type;
if (ret)
(void)__drm_bo_write_unlock(lock);
mutex_unlock(&dev->struct_mutex);
return ret;
}
int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
struct drm_ref_object *ro;
mutex_lock(&dev->struct_mutex);
if (lock->base.owner != file_priv) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE);
BUG_ON(!ro);
drm_remove_ref_object(file_priv, ro);
lock->base.owner = NULL;
mutex_unlock(&dev->struct_mutex);
return 0;
}

View File

@ -146,6 +146,7 @@ void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
iounmap(virtual);
}
EXPORT_SYMBOL(drm_mem_reg_iounmap);
static int drm_copy_io_page(void *dst, void *src, unsigned long page)
{
@ -271,6 +272,77 @@ out:
}
EXPORT_SYMBOL(drm_bo_move_memcpy);
static int drm_memset_io_page(void *dst, unsigned long page)
{
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
memset_io(dst, 0, PAGE_SIZE);
return 0;
}
static int drm_memset_ttm_page(struct drm_ttm *ttm, unsigned long page)
{
struct page *d = drm_ttm_get_page(ttm, page);
void *dst;
dst = kmap(d);
if (!dst)
return -ENOMEM;
memset_io(dst, 0, PAGE_SIZE);
kunmap(d);
return 0;
}
int drm_bo_move_zero(struct drm_buffer_object *bo,
int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
{
struct drm_device *dev = bo->dev;
struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
struct drm_ttm *ttm = bo->ttm;
void *new_iomap;
int ret;
struct drm_bo_mem_reg *old_mem = &bo->mem;
uint64_t save_flags = old_mem->flags;
uint64_t save_proposed_flags = old_mem->proposed_flags;
unsigned long i;
unsigned long page;
ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
if (ret)
goto out;
if (new_iomap == NULL && ttm == NULL)
goto out2;
for (i = 0; i < new_mem->num_pages; ++i) {
if (new_iomap == NULL)
ret = drm_memset_ttm_page(ttm, i);
else
ret = drm_memset_io_page(new_iomap, i);
if (ret)
goto out1;
}
mb();
out2:
drm_bo_free_old_node(bo);
*old_mem = *new_mem;
new_mem->mm_node = NULL;
old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
drm_ttm_unbind(ttm);
drm_ttm_destroy(ttm);
bo->ttm = NULL;
}
out1:
drm_mem_reg_iounmap(dev, new_mem, new_iomap);
out:
return ret;
}
EXPORT_SYMBOL(drm_bo_move_zero);
/*
* Transfer a buffer object's memory and LRU status to a newly
* created object. User-space references remains with the old
@ -561,6 +633,10 @@ int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
if (ret)
return ret;
/* clear the clean flags */
bo->mem.flags &= ~DRM_BO_FLAG_CLEAN;
bo->mem.proposed_flags &= ~DRM_BO_FLAG_CLEAN;
if (bus_size == 0) {
return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {

View File

@ -52,9 +52,9 @@ struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map
{
struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && map->type == entry->map->type &&
((entry->map->offset == map->offset) ||
(map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
if (entry->map && (entry->master == dev->primary->master) && (map->type == entry->map->type) &&
((entry->map->offset == map->offset) ||
((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) {
return entry;
}
}
@ -209,12 +209,12 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
map->offset = (unsigned long)map->handle;
if (map->flags & _DRM_CONTAINS_LOCK) {
/* Prevent a 2nd X Server from creating a 2nd lock */
if (dev->lock.hw_lock != NULL) {
if (dev->primary->master->lock.hw_lock != NULL) {
vfree(map->handle);
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EBUSY;
}
dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
}
break;
case _DRM_AGP: {
@ -318,6 +318,7 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
list->user_token = list->hash.key << PAGE_SHIFT;
mutex_unlock(&dev->struct_mutex);
list->master = dev->primary->master;
*maplist = list;
return 0;
}
@ -344,7 +345,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_map_list *maplist;
int err;
if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
return -EPERM;
err = drm_addmap_core(dev, map->offset, map->size, map->type,
@ -379,10 +380,12 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
struct drm_map_list *r_list = NULL, *list_t;
drm_dma_handle_t dmah;
int found = 0;
struct drm_master *master;
/* Find the list entry for the map and remove it */
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (r_list->map == map) {
master = r_list->master;
list_del(&r_list->head);
drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT);
@ -412,6 +415,13 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
break;
case _DRM_SHM:
vfree(map->handle);
if (master) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
master->lock.hw_lock = NULL; /* SHM removed */
master->lock.file_priv = NULL;
wake_up_interruptible(&master->lock.lock_queue);
}
break;
case _DRM_AGP:
case _DRM_SCATTER_GATHER:
@ -1518,6 +1528,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
dev->buf_use++; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
DRM_DEBUG("dma buf count %d, req count %d\n", request->count, dma->buf_count);
if (request->count >= dma->buf_count) {
if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
|| (drm_core_check_feature(dev, DRIVER_SG)
@ -1528,6 +1539,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
unsigned long token = dev->agp_buffer_token;
if (!map) {
DRM_DEBUG("No map\n");
retcode = -EINVAL;
goto done;
}
@ -1545,6 +1557,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
up_write(&current->mm->mmap_sem);
}
if (virtual > -1024UL) {
DRM_DEBUG("mmap failed\n");
/* Real error */
retcode = (signed long)virtual;
goto done;

View File

@ -217,7 +217,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
mutex_lock(&bo->mutex);
err = drm_bo_wait(bo, 0, 1, 0);
err = drm_bo_wait(bo, 0, 1, 0, 1);
if (err) {
data->type = (err == -EAGAIN) ?
VM_FAULT_MINOR : VM_FAULT_SIGBUS;
@ -730,20 +730,68 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
EXPORT_SYMBOL(idr_replace);
#endif
#if defined(DRM_KMAP_ATOMIC_PROT_PFN)
#define drm_kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
static __inline__ unsigned long __round_jiffies(unsigned long j, int cpu)
{
int rem;
unsigned long original = j;
j += cpu * 3;
rem = j % HZ;
if (rem < HZ/4) /* round down */
j = j - rem;
else /* round up */
j = j - rem + HZ;
/* now that we have rounded, subtract the extra skew again */
j -= cpu * 3;
if (j <= jiffies) /* rounding ate our timeout entirely; */
return original;
return j;
}
static __inline__ unsigned long __round_jiffies_relative(unsigned long j, int cpu)
{
return __round_jiffies(j + jiffies, cpu) - jiffies;
}
unsigned long round_jiffies_relative(unsigned long j)
{
return __round_jiffies_relative(j, raw_smp_processor_id());
}
EXPORT_SYMBOL(round_jiffies_relative);
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
{
struct pci_dev *dev = NULL;
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
if (pci_domain_nr(dev->bus) == 0 &&
(dev->bus->number == bus && dev->devfn == devfn))
return dev;
}
return NULL;
}
EXPORT_SYMBOL(pci_get_bus_and_slot);
#endif
#if defined(DRM_KMAP_ATOMIC_PROT_PFN)
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
pgprot_t protection)
{
enum fixed_addresses idx;
unsigned long vaddr;
static pte_t *km_pte;
int level;
static int initialized = 0;
if (unlikely(!initialized)) {
km_pte = drm_kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
km_pte = lookup_address(__fix_to_virt(FIX_KMAP_BEGIN), &level);
initialized = 1;
}
@ -756,7 +804,6 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
}
EXPORT_SYMBOL(kmap_atomic_prot_pfn);
#endif
#ifdef DRM_FULL_MM_COMPAT

View File

@ -60,6 +60,13 @@
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
#undef DRM_IRQ_ARGS
#define DRM_IRQ_ARGS int irq, void *arg, struct pt_regs *regs
typedef _Bool bool;
enum {
false = 0,
true = 1
};
#endif
#ifndef list_for_each_safe
@ -152,7 +159,7 @@ static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
#define vmalloc_user(_size) ({void * tmp = vmalloc(_size); \
if (tmp) memset(tmp, 0, size); \
if (tmp) memset(tmp, 0, _size); \
(tmp);})
#endif
@ -328,7 +335,24 @@ void *idr_replace(struct idr *idp, void *ptr, int id);
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
typedef _Bool bool;
extern unsigned long round_jiffies_relative(unsigned long j);
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
extern struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
static inline int kobject_uevent_env(struct kobject *kobj,
enum kobject_action action,
char *envp[])
{
return 0;
}
#endif
#ifndef PM_EVENT_PRETHAW
#define PM_EVENT_PRETHAW 3
#endif
@ -380,6 +404,14 @@ extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address, int *type);
#endif
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
#define drm_on_each_cpu(handler, data, wait) \
on_each_cpu(handler, data, wait)
#else
#define drm_on_each_cpu(handler, data, wait) \
on_each_cpu(handler, data, wait, 1)
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
#define drm_core_ioremap_wc drm_core_ioremap
#endif

View File

@ -257,12 +257,13 @@ static int drm_context_switch(struct drm_device *dev, int old, int new)
* hardware lock is held, clears the drm_device::context_flag and wakes up
* drm_device::context_wait.
*/
static int drm_context_switch_complete(struct drm_device *dev, int new)
static int drm_context_switch_complete(struct drm_device *dev,
struct drm_file *file_priv, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
@ -421,7 +422,7 @@ int drm_newctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle);
return 0;
}
@ -443,9 +444,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
file_priv->remove_auth_on_close = 1;
}
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev, ctx->handle);

2383
linux-core/drm_crtc.c Normal file

File diff suppressed because it is too large Load Diff

716
linux-core/drm_crtc.h Normal file
View File

@ -0,0 +1,716 @@
/*
* Copyright © 2006 Keith Packard
* Copyright © 2007 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*/
#ifndef __DRM_CRTC_H__
#define __DRM_CRTC_H__
#include <linux/i2c.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/idr.h>
#include <linux/fb.h>
struct drm_device;
struct drm_mode_set;
struct drm_framebuffer;
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
#define DRM_MODE_OBJECT_MODE 0xdededede
#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
struct drm_mode_object {
uint32_t id;
uint32_t type;
};
/*
* Note on terminology: here, for brevity and convenience, we refer to connector
* control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
* DVI, etc. And 'screen' refers to the whole of the visible display, which
* may span multiple monitors (and therefore multiple CRTC and connector
* structures).
*/
enum drm_mode_status {
MODE_OK = 0, /* Mode OK */
MODE_HSYNC, /* hsync out of range */
MODE_VSYNC, /* vsync out of range */
MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
MODE_BAD_WIDTH, /* requires an unsupported linepitch */
MODE_NOMODE, /* no mode with a maching name */
MODE_NO_INTERLACE, /* interlaced mode not supported */
MODE_NO_DBLESCAN, /* doublescan mode not supported */
MODE_NO_VSCAN, /* multiscan mode not supported */
MODE_MEM, /* insufficient video memory */
MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
MODE_MEM_VIRT, /* insufficient video memory given virtual size */
MODE_NOCLOCK, /* no fixed clock available */
MODE_CLOCK_HIGH, /* clock required is too high */
MODE_CLOCK_LOW, /* clock required is too low */
MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
MODE_BAD_HVALUE, /* horizontal timing was out of range */
MODE_BAD_VVALUE, /* vertical timing was out of range */
MODE_BAD_VSCAN, /* VScan value out of range */
MODE_HSYNC_NARROW, /* horizontal sync too narrow */
MODE_HSYNC_WIDE, /* horizontal sync too wide */
MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
MODE_HBLANK_WIDE, /* horizontal blanking too wide */
MODE_VSYNC_NARROW, /* vertical sync too narrow */
MODE_VSYNC_WIDE, /* vertical sync too wide */
MODE_VBLANK_NARROW, /* vertical blanking too narrow */
MODE_VBLANK_WIDE, /* vertical blanking too wide */
MODE_PANEL, /* exceeds panel dimensions */
MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
MODE_ONE_WIDTH, /* only one width is supported */
MODE_ONE_HEIGHT, /* only one height is supported */
MODE_ONE_SIZE, /* only one resolution is supported */
MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
MODE_UNVERIFIED = -3, /* mode needs to reverified */
MODE_BAD = -2, /* unspecified reason */
MODE_ERROR = -1 /* error condition */
};
#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
DRM_MODE_TYPE_CRTC_C)
#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
.name = nm, .status = 0, .type = (t), .clock = (c), \
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
.vscan = (vs), .flags = (f), .vrefresh = 0
#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
struct drm_display_mode {
/* Header */
struct list_head head;
struct drm_mode_object base;
char name[DRM_DISPLAY_MODE_LEN];
int connector_count;
enum drm_mode_status status;
int type;
/* Proposed mode values */
int clock;
int hdisplay;
int hsync_start;
int hsync_end;
int htotal;
int hskew;
int vdisplay;
int vsync_start;
int vsync_end;
int vtotal;
int vscan;
unsigned int flags;
/* Addressable image size (may be 0 for projectors, etc.) */
int width_mm;
int height_mm;
/* Actual mode we give to hw */
int clock_index;
int synth_clock;
int crtc_hdisplay;
int crtc_hblank_start;
int crtc_hblank_end;
int crtc_hsync_start;
int crtc_hsync_end;
int crtc_htotal;
int crtc_hskew;
int crtc_vdisplay;
int crtc_vblank_start;
int crtc_vblank_end;
int crtc_vsync_start;
int crtc_vsync_end;
int crtc_vtotal;
int crtc_hadjusted;
int crtc_vadjusted;
/* Driver private mode info */
int private_size;
int *private;
int private_flags;
int vrefresh;
float hsync;
};
enum drm_connector_status {
connector_status_connected = 1,
connector_status_disconnected = 2,
connector_status_unknown = 3,
};
enum subpixel_order {
SubPixelUnknown = 0,
SubPixelHorizontalRGB,
SubPixelHorizontalBGR,
SubPixelVerticalRGB,
SubPixelVerticalBGR,
SubPixelNone,
};
/*
* Describes a given display (e.g. CRT or flat panel) and its limitations.
*/
struct drm_display_info {
char name[DRM_DISPLAY_INFO_LEN];
/* Input info */
bool serration_vsync;
bool sync_on_green;
bool composite_sync;
bool separate_syncs;
bool blank_to_black;
unsigned char video_level;
bool digital;
/* Physical size */
unsigned int width_mm;
unsigned int height_mm;
/* Display parameters */
unsigned char gamma; /* FIXME: storage format */
bool gtf_supported;
bool standard_color;
enum {
monochrome = 0,
rgb,
other,
unknown,
} display_type;
bool active_off_supported;
bool suspend_supported;
bool standby_supported;
/* Color info FIXME: storage format */
unsigned short redx, redy;
unsigned short greenx, greeny;
unsigned short bluex, bluey;
unsigned short whitex, whitey;
/* Clock limits FIXME: storage format */
unsigned int min_vfreq, max_vfreq;
unsigned int min_hfreq, max_hfreq;
unsigned int pixel_clock;
/* White point indices FIXME: storage format */
unsigned int wpx1, wpy1;
unsigned int wpgamma1;
unsigned int wpx2, wpy2;
unsigned int wpgamma2;
enum subpixel_order subpixel_order;
char *raw_edid; /* if any */
};
struct drm_framebuffer_funcs {
void (*destroy)(struct drm_framebuffer *framebuffer);
int (*create_handle)(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle);
};
struct drm_framebuffer {
struct drm_device *dev;
struct list_head head;
struct drm_mode_object base;
const struct drm_framebuffer_funcs *funcs;
unsigned int pitch;
unsigned int width;
unsigned int height;
/* depth can be 15 or 16 */
unsigned int depth;
int bits_per_pixel;
int flags;
void *fbdev;
u32 pseudo_palette[17];
struct list_head filp_head;
};
struct drm_property_blob {
struct drm_mode_object base;
struct list_head head;
unsigned int length;
void *data;
};
struct drm_property_enum {
uint64_t value;
struct list_head head;
char name[DRM_PROP_NAME_LEN];
};
struct drm_property {
struct list_head head;
struct drm_mode_object base;
uint32_t flags;
char name[DRM_PROP_NAME_LEN];
uint32_t num_values;
uint64_t *values;
struct list_head enum_blob_list;
};
struct drm_crtc;
struct drm_connector;
struct drm_encoder;
/**
* drm_crtc_funcs - control CRTCs for a given device
* @dpms: control display power levels
* @save: save CRTC state
* @resore: restore CRTC state
* @lock: lock the CRTC
* @unlock: unlock the CRTC
* @shadow_allocate: allocate shadow pixmap
* @shadow_create: create shadow pixmap for rotation support
* @shadow_destroy: free shadow pixmap
* @mode_fixup: fixup proposed mode
* @mode_set: set the desired mode on the CRTC
* @gamma_set: specify color ramp for CRTC
* @destroy: deinit and free object.
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
* CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc.
* connectors, not just CRTs).
*
* Each driver is responsible for filling out this structure at startup time,
* in addition to providing other modesetting features, like i2c and DDC
* bus accessors.
*/
struct drm_crtc_funcs {
/* Save CRTC state */
void (*save)(struct drm_crtc *crtc); /* suspend? */
/* Restore CRTC state */
void (*restore)(struct drm_crtc *crtc); /* resume? */
/* cursor controls */
int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
/* Set gamma on the CRTC */
void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t size);
/* Object destroy routine */
void (*destroy)(struct drm_crtc *crtc);
int (*set_config)(struct drm_mode_set *set);
};
/**
* drm_crtc - central CRTC control structure
* @enabled: is this CRTC enabled?
* @x: x position on screen
* @y: y position on screen
* @desired_mode: new desired mode
* @desired_x: desired x for desired_mode
* @desired_y: desired y for desired_mode
* @funcs: CRTC control functions
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
*/
struct drm_crtc {
struct drm_device *dev;
struct list_head head;
struct drm_mode_object base;
/* framebuffer the connector is currently bound to */
struct drm_framebuffer *fb;
bool enabled;
struct drm_display_mode mode;
int x, y;
struct drm_display_mode *desired_mode;
int desired_x, desired_y;
const struct drm_crtc_funcs *funcs;
/* CRTC gamma size for reporting to userspace */
uint32_t gamma_size;
uint16_t *gamma_store;
/* if you are using the helper */
void *helper_private;
};
/**
* drm_connector_funcs - control connectors on a given device
* @dpms: set power state (see drm_crtc_funcs above)
* @save: save connector state
* @restore: restore connector state
* @mode_valid: is this mode valid on the given connector?
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
* @detect: is this connector active?
* @get_modes: get mode list for this connector
* @set_property: property for this connector may need update
* @destroy: make object go away
*
* Each CRTC may have one or more connectors attached to it. The functions
* below allow the core DRM code to control connectors, enumerate available modes,
* etc.
*/
struct drm_connector_funcs {
void (*dpms)(struct drm_connector *connector, int mode);
void (*save)(struct drm_connector *connector);
void (*restore)(struct drm_connector *connector);
enum drm_connector_status (*detect)(struct drm_connector *connector);
void (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
int (*set_property)(struct drm_connector *connector, struct drm_property *property,
uint64_t val);
void (*destroy)(struct drm_connector *connector);
};
struct drm_encoder_funcs {
void (*destroy)(struct drm_encoder *encoder);
};
#define DRM_CONNECTOR_MAX_UMODES 16
#define DRM_CONNECTOR_MAX_PROPERTY 16
#define DRM_CONNECTOR_LEN 32
#define DRM_CONNECTOR_MAX_ENCODER 2
/**
* drm_encoder - central DRM encoder structure
*/
struct drm_encoder {
struct drm_device *dev;
struct list_head head;
struct drm_mode_object base;
int encoder_type;
uint32_t possible_crtcs;
uint32_t possible_clones;
struct drm_crtc *crtc;
const struct drm_encoder_funcs *funcs;
void *helper_private;
};
/**
* drm_connector - central DRM connector control structure
* @crtc: CRTC this connector is currently connected to, NULL if none
* @interlace_allowed: can this connector handle interlaced modes?
* @doublescan_allowed: can this connector handle doublescan?
* @available_modes: modes available on this connector (from get_modes() + user)
* @initial_x: initial x position for this connector
* @initial_y: initial y position for this connector
* @status: connector connected?
* @funcs: connector control functions
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
* another connector if they can share a CRTC. Each connector also has a specific
* position in the broader display (referred to as a 'screen' though it could
* span multiple monitors).
*/
struct drm_connector {
struct drm_device *dev;
struct device kdev;
struct device_attribute *attr;
struct list_head head;
struct drm_mode_object base;
int connector_type;
int connector_type_id;
bool interlace_allowed;
bool doublescan_allowed;
struct list_head modes; /* list of modes on this connector */
int initial_x, initial_y;
enum drm_connector_status status;
/* these are modes added by probing with DDC or the BIOS */
struct list_head probed_modes;
struct drm_display_info display_info;
const struct drm_connector_funcs *funcs;
struct list_head user_modes;
struct drm_property_blob *edid_blob_ptr;
u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
void *helper_private;
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
uint32_t force_encoder_id;
struct drm_encoder *encoder; /* currently active encoder */
};
/**
* struct drm_mode_set
*
* Represents a single crtc the connectors that it drives with what mode
* and from which framebuffer it scans out from.
*
* This is used to set modes.
*/
struct drm_mode_set {
struct list_head head;
struct drm_framebuffer *fb;
struct drm_crtc *crtc;
struct drm_display_mode *mode;
uint32_t x;
uint32_t y;
struct drm_connector **connectors;
size_t num_connectors;
};
/**
* struct drm_mode_config_funcs - configure CRTCs for a given screen layout
* @resize: adjust CRTCs as necessary for the proposed layout
*
* Currently only a resize hook is available. DRM will call back into the
* driver with a new screen width and height. If the driver can't support
* the proposed size, it can return false. Otherwise it should adjust
* the CRTC<->connector mappings as needed and update its view of the screen.
*/
struct drm_mode_config_funcs {
int (*resize_fb)(struct drm_device *dev, struct drm_file *file_priv, struct drm_framebuffer *fb, struct drm_mode_fb_cmd *mode_cmd);
struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd);
int (*fb_changed)(struct drm_device *dev);
};
struct drm_mode_group {
uint32_t num_crtcs;
uint32_t num_encoders;
uint32_t num_connectors;
/* list of object IDs for this group */
uint32_t *id_list;
};
/**
* drm_mode_config - Mode configuration control structure
*
*/
struct drm_mode_config {
struct mutex mutex; /* protects configuration and IDR */
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
/* this is limited to one for now */
int num_fb;
struct list_head fb_list;
int num_connector;
struct list_head connector_list;
int num_encoder;
struct list_head encoder_list;
int num_crtc;
struct list_head crtc_list;
struct list_head property_list;
/* in-kernel framebuffers - hung of filp_head in drm_framebuffer */
struct list_head fb_kernel_list;
int min_width, min_height;
int max_width, max_height;
struct drm_mode_config_funcs *funcs;
unsigned long fb_base;
/* pointers to standard properties */
struct list_head property_blob_list;
struct drm_property *edid_property;
struct drm_property *dpms_property;
/* DVI-I properties */
struct drm_property *dvi_i_subconnector_property;
struct drm_property *dvi_i_select_subconnector_property;
/* TV properties */
struct drm_property *tv_subconnector_property;
struct drm_property *tv_select_subconnector_property;
struct drm_property *tv_mode_property;
struct drm_property *tv_left_margin_property;
struct drm_property *tv_right_margin_property;
struct drm_property *tv_top_margin_property;
struct drm_property *tv_bottom_margin_property;
/* Optional properties */
struct drm_property *scaling_mode_property;
struct drm_property *dithering_mode_property;
/* hotplug */
uint32_t hotplug_counter;
};
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
#define obj_to_property(x) container_of(x, struct drm_property, base)
#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
extern void drm_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs);
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
extern void drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type);
extern void drm_connector_cleanup(struct drm_connector *connector);
extern void drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type);
extern void drm_encoder_cleanup(struct drm_encoder *encoder);
extern char *drm_get_connector_name(struct drm_connector *connector);
extern char *drm_get_dpms_name(int val);
extern char *drm_get_select_subconnector_name(int val);
extern char *drm_get_subconnector_name(int val);
extern void drm_fb_release(struct file *filp);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
extern unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
struct drm_display_mode *mode);
extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
extern void drm_mode_config_init(struct drm_device *dev);
extern void drm_mode_config_cleanup(struct drm_device *dev);
extern void drm_mode_set_name(struct drm_display_mode *mode);
extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
extern int drm_mode_width(struct drm_display_mode *mode);
extern int drm_mode_height(struct drm_display_mode *mode);
/* for us by fb module */
extern int drm_mode_attachmode_crtc(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_display_mode *mode);
extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
extern void drm_mode_list_concat(struct list_head *head,
struct list_head *new);
extern void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
int maxX, int maxY, int maxPitch);
extern void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
extern void drm_mode_sort(struct list_head *mode_list);
extern int drm_mode_vrefresh(struct drm_display_mode *mode);
extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
extern void drm_mode_connector_list_update(struct drm_connector *connector);
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid);
extern int drm_connector_property_set_value(struct drm_connector *connector,
struct drm_property *property,
uint64_t value);
extern int drm_connector_property_get_value(struct drm_connector *connector,
struct drm_property *property,
uint64_t *value);
extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
extern void drm_framebuffer_set_object(struct drm_device *dev,
unsigned long handle);
extern struct drm_framebuffer *drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs);
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
extern bool drm_crtc_in_use(struct drm_crtc *crtc);
extern int drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val);
extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values);
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
char *formats[]);
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_dithering_property(struct drm_device *dev);
extern char *drm_get_encoder_name(struct drm_encoder *encoder);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
extern void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type);
/* IOCTLs */
extern int drm_mode_getresources(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getconnector(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_setcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_cursor_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_addmode_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_attachmode_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_detachmode_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_hotplug_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_replacefb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getencoder(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
#endif /* __DRM_CRTC_H__ */

View File

@ -0,0 +1,833 @@
/* (c) 2006-2007 Intel Corporation
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
*
* DRM core CRTC related functions
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*
* Authors:
* Keith Packard
* Eric Anholt <eric@anholt.net>
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
/*
* Detailed mode info for a standard 640x480@60Hz monitor
*/
static struct drm_display_mode std_mode[] = {
{ DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 25200, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
};
/**
* drm_helper_probe_connector_modes - get complete set of display modes
* @dev: DRM device
* @maxX: max width for modes
* @maxY: max height for modes
*
* LOCKING:
* Caller must hold mode config lock.
*
* Based on @dev's mode_config layout, scan all the connectors and try to detect
* modes on them. Modes will first be added to the connector's probed_modes
* list, then culled (based on validity and the @maxX, @maxY parameters) and
* put into the normal modes list.
*
* Intended to be used either at bootup time or when major configuration
* changes have occurred.
*
* FIXME: take into account monitor limits
*/
void drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *t;
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
int ret;
DRM_DEBUG("%s\n", drm_get_connector_name(connector));
/* set all modes to the unverified state */
list_for_each_entry_safe(mode, t, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
connector->status = (*connector->funcs->detect)(connector);
if (connector->status == connector_status_disconnected) {
DRM_DEBUG("%s is disconnected\n", drm_get_connector_name(connector));
/* TODO set EDID to NULL */
return;
}
ret = (*connector_funcs->get_modes)(connector);
if (ret) {
drm_mode_connector_list_update(connector);
}
if (maxX && maxY)
drm_mode_validate_size(dev, &connector->modes, maxX,
maxY, 0);
list_for_each_entry_safe(mode, t, &connector->modes, head) {
if (mode->status == MODE_OK)
mode->status = (*connector_funcs->mode_valid)(connector,mode);
}
drm_mode_prune_invalid(dev, &connector->modes, true);
if (list_empty(&connector->modes)) {
struct drm_display_mode *stdmode;
DRM_DEBUG("No valid modes on %s\n", drm_get_connector_name(connector));
/* Should we do this here ???
* When no valid EDID modes are available we end up
* here and bailed in the past, now we add a standard
* 640x480@60Hz mode and carry on.
*/
stdmode = drm_mode_duplicate(dev, &std_mode[0]);
drm_mode_probed_add(connector, stdmode);
drm_mode_list_concat(&connector->probed_modes,
&connector->modes);
DRM_DEBUG("Adding standard 640x480 @ 60Hz to %s\n",
drm_get_connector_name(connector));
}
drm_mode_sort(&connector->modes);
DRM_DEBUG("Probed modes for %s\n", drm_get_connector_name(connector));
list_for_each_entry_safe(mode, t, &connector->modes, head) {
mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
drm_mode_debug_printmodeline(mode);
}
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
void drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX, uint32_t maxY)
{
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_probe_single_connector_modes(connector, maxX, maxY);
}
}
EXPORT_SYMBOL(drm_helper_probe_connector_modes);
/**
* drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
* @crtc: CRTC to check
*
* LOCKING:
* Caller must hold mode config lock.
*
* Walk @crtc's DRM device's mode_config and see if it's in use.
*
* RETURNS:
* True if @crtc is part of the mode_config, false otherwise.
*/
bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
/* FIXME: Locking around list access? */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
if (encoder->crtc == crtc)
return true;
return false;
}
EXPORT_SYMBOL(drm_helper_crtc_in_use);
/**
* drm_disable_unused_functions - disable unused objects
* @dev: DRM device
*
* LOCKING:
* Caller must hold mode config lock.
*
* If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
* by calling its dpms function, which should power it off.
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc *crtc;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder_funcs = encoder->helper_private;
if (!encoder->crtc)
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled) {
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
crtc->fb = NULL;
}
}
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (drm_mode_width(mode) > width ||
drm_mode_height(mode) > height)
continue;
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return mode;
}
return NULL;
}
static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
if (strict) {
enable = connector->status == connector_status_connected;
} else {
enable = connector->status != connector_status_disconnected;
}
return enable;
}
static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
{
bool any_enabled = false;
struct drm_connector *connector;
int i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
any_enabled |= enabled[i] = drm_connector_enabled(connector, true);
i++;
}
if (!any_enabled) {
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
enabled[i] = drm_connector_enabled(connector, false);
i++;
}
}
}
static bool drm_target_preferred(struct drm_device *dev, struct drm_display_mode **modes,
bool *enabled, int width, int height)
{
struct drm_connector *connector;
struct drm_display_mode *preferred;
int i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (enabled[i] == false) {
i++;
continue;
}
modes[i] = drm_has_preferred_mode(connector, width, height);
if (!modes[i]) {
list_for_each_entry(modes[i], &connector->modes, head)
break;
}
i++;
}
return true;
}
static int drm_pick_crtcs(struct drm_device *dev,
struct drm_crtc **best_crtcs,
struct drm_display_mode **modes,
int n, int width, int height)
{
int c, o;
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
struct drm_crtc *best_crtc;
int my_score, best_score, score;
struct drm_crtc **crtcs, *crtc;
if (n == dev->mode_config.num_connector)
return 0;
c = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (c == n)
break;
c++;
}
best_crtcs[n] = NULL;
best_crtc = NULL;
best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
if (modes[n] == NULL)
return best_score;
crtcs = kmalloc(dev->mode_config.num_connector * sizeof(struct drm_crtc *), GFP_KERNEL);
if (!crtcs)
return best_score;
my_score = 1;
if (connector->status == connector_status_connected)
my_score++;
if (drm_has_preferred_mode(connector, width, height))
my_score++;
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
if (!encoder)
goto out;
connector->encoder = encoder;
/* select a crtc for this connector and then attempt to configure
remaining connectors */
c = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if ((connector->encoder->possible_crtcs & (1 << c)) == 0) {
c++;
continue;
}
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
break;
if (o < n) {
/* ignore cloning for now */
c++;
continue;
}
crtcs[n] = crtc;
memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1, width, height);
if (score > best_score) {
best_crtc = crtc;
best_score = score;
memcpy(best_crtcs, crtcs, dev->mode_config.num_connector * sizeof(struct drm_crtc *));
}
c++;
}
out:
kfree(crtcs);
return best_score;
}
static void drm_setup_crtcs(struct drm_device *dev)
{
struct drm_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_encoder *encoder;
struct drm_connector *connector;
bool *enabled;
int width, height;
int i, ret;
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
/* clean out all the encoder/crtc combos */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder->crtc = NULL;
}
crtcs = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_display_mode *), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), GFP_KERNEL);
drm_enable_connectors(dev, enabled);
ret = drm_target_preferred(dev, modes, enabled, width, height);
if (!ret)
DRM_ERROR("Unable to find initial modes\n");
drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_display_mode *mode = modes[i];
struct drm_crtc *crtc = crtcs[i];
if (connector->encoder == NULL) {
i++;
continue;
}
if (mode && crtc) {
crtc->desired_mode = mode;
connector->encoder->crtc = crtc;
} else
connector->encoder->crtc = NULL;
i++;
}
kfree(crtcs);
kfree(modes);
kfree(enabled);
}
/**
* drm_crtc_set_mode - set a mode
* @crtc: CRTC to program
* @mode: mode to use
* @x: width of mode
* @y: height of mode
*
* LOCKING:
* Caller must hold mode config lock.
*
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
* to fixup or reject the mode prior to trying to set it.
*
* RETURNS:
* True if the mode was set successfully, or false otherwise.
*/
bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *adjusted_mode, saved_mode;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
struct drm_encoder *encoder;
bool ret = true;
adjusted_mode = drm_mode_duplicate(dev, mode);
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled)
return true;
saved_mode = crtc->mode;
saved_x = crtc->x;
saved_y = crtc->y;
/* Update crtc values up front so the driver can rely on them for mode
* setting.
*/
crtc->mode = *mode;
crtc->x = x;
crtc->y = y;
if (drm_mode_equal(&saved_mode, &crtc->mode)) {
if (saved_x != crtc->x || saved_y != crtc->y) {
crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y);
goto done;
}
}
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
encoder_funcs = encoder->helper_private;
if (!(ret = encoder_funcs->mode_fixup(encoder, mode, adjusted_mode))) {
goto done;
}
}
if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
goto done;
}
/* Prepare the encoders and CRTCs before setting the mode. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
encoder_funcs = encoder->helper_private;
/* Disable the encoders as the first thing we do. */
encoder_funcs->prepare(encoder);
}
crtc_funcs->prepare(crtc);
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
*/
crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder), mode->name, mode->base.id);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
}
/* Now, enable the clocks, plane, pipe, and connectors that we set up. */
crtc_funcs->commit(crtc);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
encoder_funcs = encoder->helper_private;
encoder_funcs->commit(encoder);
}
/* XXX free adjustedmode */
drm_mode_destroy(dev, adjusted_mode);
/* TODO */
// if (scrn->pScreen)
// drm_crtc_set_screen_sub_pixel_order(dev);
done:
if (!ret) {
crtc->mode = saved_mode;
crtc->x = saved_x;
crtc->y = saved_y;
}
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @crtc: CRTC to setup
* @crtc_info: user provided configuration
* @new_mode: new mode to set
* @connector_set: set of connectors for the new config
* @fb: new framebuffer
*
* LOCKING:
* Caller must hold mode config lock.
*
* Setup a new configuration, provided by the user in @crtc_info, and enable
* it.
*
* RETURNS:
* Zero. (FIXME)
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_crtc **save_crtcs, *new_crtc;
struct drm_encoder **save_encoders, *new_encoder;
bool save_enabled;
bool changed = false;
bool flip_or_move = false;
struct drm_connector *connector;
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
int ret = 0;
DRM_DEBUG("\n");
if (!set)
return -EINVAL;
if (!set->crtc)
return -EINVAL;
if (!set->crtc->helper_private)
return -EINVAL;
crtc_funcs = set->crtc->helper_private;
DRM_DEBUG("crtc: %p %d fb: %p connectors: %p num_connectors: %i (x, y) (%i, %i)\n", set->crtc, set->crtc->base.id, set->fb, set->connectors, set->num_connectors, set->x, set->y);
dev = set->crtc->dev;
/* save previous config */
save_enabled = set->crtc->enabled;
/* this is meant to be num_connector not num_crtc */
save_crtcs = kzalloc(dev->mode_config.num_connector * sizeof(struct drm_crtc *), GFP_KERNEL);
if (!save_crtcs)
return -ENOMEM;
save_encoders = kzalloc(dev->mode_config.num_connector * sizeof(struct drm_encoders *), GFP_KERNEL);
if (!save_encoders) {
kfree(save_crtcs);
return -ENOMEM;
}
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
if (set->crtc->fb != set->fb) {
/* if we have no fb then its a change not a flip */
if (set->crtc->fb == NULL)
changed = true;
else
flip_or_move = true;
}
if (set->x != set->crtc->x || set->y != set->crtc->y)
flip_or_move = true;
if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
DRM_DEBUG("modes are different\n");
drm_mode_debug_printmodeline(&set->crtc->mode);
drm_mode_debug_printmodeline(set->mode);
changed = true;
}
/* a) traverse passed in connector list and get encoders for them */
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
save_encoders[count++] = connector->encoder;
new_encoder = connector->encoder;
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == connector) {
new_encoder = connector_funcs->best_encoder(connector);
/* if we can't get an encoder for a connector
we are setting now - then fail */
if (new_encoder == NULL)
/* don't break so fail path works correct */
fail = 1;
break;
}
}
if (new_encoder != connector->encoder) {
changed = true;
connector->encoder = new_encoder;
}
}
if (fail) {
ret = -EINVAL;
goto fail_no_encoder;
}
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
continue;
save_crtcs[count++] = connector->encoder->crtc;
if (connector->encoder->crtc == set->crtc)
new_crtc = NULL;
else
new_crtc = connector->encoder->crtc;
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == connector)
new_crtc = set->crtc;
}
if (new_crtc != connector->encoder->crtc) {
changed = true;
connector->encoder->crtc = new_crtc;
}
}
/* mode_set_base is not a required function */
if (flip_or_move && !crtc_funcs->mode_set_base)
changed = true;
if (changed) {
set->crtc->fb = set->fb;
set->crtc->enabled = (set->mode != NULL);
if (set->mode != NULL) {
DRM_DEBUG("attempting to set mode from userspace\n");
drm_mode_debug_printmodeline(set->mode);
if (!drm_crtc_helper_set_mode(set->crtc, set->mode, set->x,
set->y)) {
ret = -EINVAL;
goto fail_set_mode;
}
/* TODO are these needed? */
set->crtc->desired_x = set->x;
set->crtc->desired_y = set->y;
set->crtc->desired_mode = set->mode;
}
drm_helper_disable_unused_functions(dev);
} else if (flip_or_move) {
if (set->crtc->fb != set->fb)
set->crtc->fb = set->fb;
crtc_funcs->mode_set_base(set->crtc, set->x, set->y);
set->crtc->x = set->x;
set->crtc->y = set->y;
}
kfree(save_encoders);
kfree(save_crtcs);
return 0;
fail_set_mode:
set->crtc->enabled = save_enabled;
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
connector->encoder->crtc = save_crtcs[count++];
fail_no_encoder:
kfree(save_crtcs);
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
connector->encoder = save_encoders[count++];
}
kfree(save_encoders);
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
bool drm_helper_plugged_event(struct drm_device *dev)
{
DRM_DEBUG("\n");
drm_helper_probe_connector_modes(dev, dev->mode_config.max_width, dev->mode_config.max_height);
drm_setup_crtcs(dev);
/* alert the driver fb layer */
dev->mode_config.funcs->fb_changed(dev);
drm_helper_disable_unused_functions(dev);
drm_sysfs_hotplug_event(dev);
return true;
}
/**
* drm_initial_config - setup a sane initial connector configuration
* @dev: DRM device
* @can_grow: this configuration is growable
*
* LOCKING:
* Called at init time, must take mode config lock.
*
* Scan the CRTCs and connectors and try to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
bool drm_helper_initial_config(struct drm_device *dev, bool can_grow)
{
int ret = false;
drm_helper_plugged_event(dev);
return ret;
}
EXPORT_SYMBOL(drm_helper_initial_config);
/**
* drm_hotplug_stage_two
* @dev DRM device
* @connector hotpluged connector
*
* LOCKING.
* Caller must hold mode config lock, function might grab struct lock.
*
* Stage two of a hotplug.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_helper_hotplug_stage_two(struct drm_device *dev)
{
dev->mode_config.hotplug_counter++;
drm_helper_plugged_event(dev);
return 0;
}
EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd)
{
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
fb->pitch = mode_cmd->pitch;
fb->bits_per_pixel = mode_cmd->bpp;
fb->depth = mode_cmd->depth;
return 0;
}
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
int drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
int ret;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->enabled)
continue;
ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
crtc->y);
if (ret == false)
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
}
return 0;
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
void drm_helper_set_connector_dpms(struct drm_connector *connector,
int dpms_mode)
{
int i = 0;
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_mode_object *obj;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
obj = drm_mode_object_find(connector->dev,
connector->encoder_ids[i],
DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
encoder = obj_to_encoder(obj);
encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
encoder_funcs->dpms(encoder, dpms_mode);
}
}
EXPORT_SYMBOL(drm_helper_set_connector_dpms);

View File

@ -0,0 +1,98 @@
/*
* Copyright © 2006 Keith Packard
* Copyright © 2007 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*/
/*
* The DRM mode setting helper functions are common code for drivers to use if they wish.
* Drivers are not forced to use this code in their implementations but it would be useful
* if they code they do use at least provides a consistent interface and operation to userspace
*/
#ifndef __DRM_CRTC_HELPER_H__
#define __DRM_CRTC_HELPER_H__
#include <linux/i2c.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/idr.h>
#include <linux/fb.h>
struct drm_crtc_helper_funcs {
/*
* Control power levels on the CRTC. If the mode passed in is
* unsupported, the provider must use the next lowest power level.
*/
void (*dpms)(struct drm_crtc *crtc, int mode);
void (*prepare)(struct drm_crtc *crtc);
void (*commit)(struct drm_crtc *crtc);
/* Provider can fixup or change mode timings before modeset occurs */
bool (*mode_fixup)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/* Actually set the mode */
void (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y);
/* Move the crtc on the current fb to the given position *optional* */
void (*mode_set_base)(struct drm_crtc *crtc, int x, int y);
};
struct drm_encoder_helper_funcs {
void (*dpms)(struct drm_encoder *encoder, int mode);
void (*save)(struct drm_encoder *encoder);
void (*restore)(struct drm_encoder *encoder);
bool (*mode_fixup)(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*prepare)(struct drm_encoder *encoder);
void (*commit)(struct drm_encoder *encoder);
void (*mode_set)(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/* detect for DAC style encoders */
enum drm_connector_status (*detect)(struct drm_encoder *encoder, struct drm_connector *connector);
};
struct drm_connector_helper_funcs {
int (*get_modes)(struct drm_connector *connector);
int (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
};
extern void drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
extern void drm_helper_disable_unused_functions(struct drm_device *dev);
extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
extern bool drm_helper_initial_config(struct drm_device *dev, bool can_grow);
extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y);
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd);
static inline void drm_crtc_helper_add(struct drm_crtc *crtc, const struct drm_crtc_helper_funcs *funcs)
{
crtc->helper_private = (void *)funcs;
}
static inline void drm_encoder_helper_add(struct drm_encoder *encoder, const struct drm_encoder_helper_funcs *funcs)
{
encoder->helper_private = (void *)funcs;
}
static inline void drm_connector_helper_add(struct drm_connector *connector, const struct drm_connector_helper_funcs *funcs)
{
connector->helper_private = (void *)funcs;
}
extern int drm_helper_resume_force_mode(struct drm_device *dev);
extern void drm_helper_set_connector_dpms(struct drm_connector *connector,
int dpms_mode);
#endif

View File

@ -58,6 +58,7 @@ int drm_dma_setup(struct drm_device *dev)
return 0;
}
EXPORT_SYMBOL(drm_dma_setup);
/**
* Cleanup the DMA resources.
@ -120,6 +121,7 @@ void drm_dma_takedown(struct drm_device *dev)
drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
dev->dma = NULL;
}
EXPORT_SYMBOL(drm_dma_takedown);
/**
* Free a buffer.

View File

@ -56,26 +56,29 @@ static int drm_version(struct drm_device *dev, void *data,
/** Ioctl table */
static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@ -100,7 +103,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
/* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER),
#if __OS_HAS_AGP
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@ -121,35 +124,25 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_REPLACEFB, drm_mode_replacefb, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
#if OS_HAS_GEM
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
@ -172,30 +165,24 @@ static struct drm_ioctl_desc drm_ioctls[] = {
*/
int drm_lastclose(struct drm_device * dev)
{
struct drm_magic_entry *pt, *next;
struct drm_map_list *r_list, *list_t;
struct drm_vma_entry *vma, *vma_temp;
int i;
DRM_DEBUG("\n");
/*
* We can't do much about this function failing.
*/
drm_bo_driver_finish(dev);
if (dev->driver->lastclose)
dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n");
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
if (!drm_core_check_feature(dev, DRIVER_MODESET))
drm_bo_driver_finish(dev);
if (dev->irq_enabled)
if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
drm_irq_uninstall(dev);
/* Free drawable information memory */
@ -204,24 +191,8 @@ int drm_lastclose(struct drm_device * dev)
drm_drawable_free_all(dev);
del_timer(&dev->timer);
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
if (dev->magicfree.next) {
list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
list_del(&pt->head);
drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
drm_ht_remove(&dev->magiclist);
}
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp) {
if (drm_core_has_AGP(dev) && dev->agp && !drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_agp_mem *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
@ -240,7 +211,10 @@ int drm_lastclose(struct drm_device * dev)
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
/* You're supposed to have a real memory manager for modesetting, but this'll suffice as a temporary workaround. */
/* This assumes sgdma is inited at load time. */
if (drm_core_check_feature(dev, DRIVER_SG) && !drm_core_check_feature(dev, DRIVER_MODESET) && dev->sg) {
drm_sg_cleanup(dev->sg);
dev->sg = NULL;
}
@ -251,12 +225,13 @@ int drm_lastclose(struct drm_device * dev)
drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
/*
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (!(r_list->map->flags & _DRM_DRIVER)) {
drm_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
}*/
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
for (i = 0; i < dev->queue_count; i++) {
@ -275,14 +250,9 @@ int drm_lastclose(struct drm_device * dev)
}
dev->queue_count = 0;
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET))
drm_dma_takedown(dev);
if (dev->lock.hw_lock) {
dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.file_priv = NULL;
wake_up_interruptible(&dev->lock.lock_queue);
}
dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex);
@ -409,12 +379,14 @@ static void drm_cleanup(struct drm_device * dev)
DRM_DEBUG("mtrr_del=%d\n", retval);
}
if (dev->driver->unload)
dev->driver->unload(dev);
drm_ht_remove(&dev->map_hash);
if (drm_core_has_AGP(dev) && dev->agp) {
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
if (dev->driver->unload)
dev->driver->unload(dev);
if (!drm_fb_loaded)
pci_disable_device(dev->pdev);
@ -422,9 +394,12 @@ static void drm_cleanup(struct drm_device * dev)
drm_ctxbitmap_cleanup(dev);
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
drm_put_minor(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
drm_put_minor(&dev->primary);
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");
}
@ -439,8 +414,14 @@ int drm_minors_cleanup(int id, void *ptr, void *data)
if (minor->dev->driver != driver)
return 0;
if (minor->type != DRM_MINOR_LEGACY)
return 0;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (minor->type != DRM_MINOR_CONTROL)
return 0;
} else {
if (minor->type != DRM_MINOR_LEGACY)
return 0;
}
if (dev)
pci_dev_put(dev->pdev);
@ -626,6 +607,7 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retcode = -EINVAL;
goto err_i1;
}
#if 0
/*
* This check is disabled, because driver private ioctl->cmd
@ -647,7 +629,7 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (cmd & IOC_IN) {
if (copy_from_user(kdata, (void __user *)arg,
_IOC_SIZE(cmd)) != 0) {
retcode = -EACCES;
retcode = -EFAULT;
goto err_i1;
}
}
@ -657,7 +639,8 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retcode = -EINVAL;
} else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
((ioctl->flags & DRM_MASTER) && !file_priv->master) ||
((!(ioctl->flags & DRM_CONTROL_ALLOW)) && (file_priv->minor->type == DRM_MINOR_CONTROL)) ) {
retcode = -EACCES;
} else {
retcode = func(dev, kdata, file_priv);
@ -666,13 +649,13 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (cmd & IOC_OUT) {
if (copy_to_user((void __user *)arg, kdata,
_IOC_SIZE(cmd)) != 0)
retcode = -EACCES;
retcode = -EFAULT;
}
err_i1:
atomic_dec(&dev->ioctl_count);
if (retcode)
DRM_DEBUG("ret = %d\n", retcode);
DRM_ERROR("ret = %x %d\n", nr, retcode);
return retcode;
}
EXPORT_SYMBOL(drm_unlocked_ioctl);

737
linux-core/drm_edid.c Normal file
View File

@ -0,0 +1,737 @@
/*
* Copyright (c) 2006 Luc Verhaegen (quirks list)
* Copyright (c) 2007 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
* FB layer.
* Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm_edid.h"
/*
* TODO:
* - support EDID 1.4
* - port quirks from X server code
*/
/*
* EDID blocks out in the wild have a variety of bugs, try to collect
* them here (note that userspace may work around broken monitors first,
* but fixes should make their way here so that the kernel "just works"
* on as many displays as possible).
*/
/* First detailed mode wrong, use largest 60Hz mode */
#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
/* Reported 135MHz pixel clock is too high, needs adjustment */
#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
/* Prefer the largest mode at 75 Hz */
#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
/* Detail timing is in cm not mm */
#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
/* Detailed timing descriptors have bogus size values, so just take the
* maximum size and use that.
*/
#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
/* Monitor forgot to set the first detailed is preferred bit. */
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
static struct edid_quirk {
char *vendor;
int product_id;
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
{ "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
/* Unknown Acer */
{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
/* Envision Peripherals, Inc. EN-7100e */
{ "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
/* Funai Electronics PM36B */
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
EDID_QUIRK_DETAILED_IN_CM },
/* LG Philips LCD LP154W01-A5 */
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
/* Philips 107p5 CRT */
{ "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
/* Proview AY765C */
{ "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
/* Samsung SyncMaster 205BW. Note: irony */
{ "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
/* Samsung SyncMaster 22[5-6]BW */
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
};
/* Valid EDID header has these bytes */
static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
/**
* edid_is_valid - sanity check EDID data
* @edid: EDID data
*
* Sanity check the EDID block by looking at the header, the version number
* and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
* valid.
*/
static bool edid_is_valid(struct edid *edid)
{
int i;
u8 csum = 0;
u8 *raw_edid = (u8 *)edid;
if (memcmp(edid->header, edid_header, sizeof(edid_header)))
goto bad;
if (edid->version != 1) {
DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
goto bad;
}
if (edid->revision <= 0 || edid->revision > 3) {
DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision);
goto bad;
}
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
if (csum) {
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
goto bad;
}
return 1;
bad:
if (raw_edid) {
DRM_ERROR("Raw EDID:\n");
print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
printk("\n");
}
return 0;
}
/**
* edid_vendor - match a string against EDID's obfuscated vendor field
* @edid: EDID to match
* @vendor: vendor string
*
* Returns true if @vendor is in @edid, false otherwise
*/
static bool edid_vendor(struct edid *edid, char *vendor)
{
char edid_vendor[3];
edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
edid_vendor[2] = (edid->mfg_id[2] & 0x1f) + '@';
return !strncmp(edid_vendor, vendor, 3);
}
/**
* edid_get_quirks - return quirk flags for a given EDID
* @edid: EDID to process
*
* This tells subsequent routines what fixes they need to apply.
*/
static u32 edid_get_quirks(struct edid *edid)
{
struct edid_quirk *quirk;
int i;
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
quirk = &edid_quirk_list[i];
if (edid_vendor(edid, quirk->vendor) &&
(EDID_PRODUCT_ID(edid) == quirk->product_id))
return quirk->quirks;
}
return 0;
}
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
/**
* edid_fixup_preferred - set preferred modes based on quirk list
* @connector: has mode list to fix up
* @quirks: quirks list
*
* Walk the mode list for @connector, clearing the preferred status
* on existing modes and setting it anew for the right mode ala @quirks.
*/
static void edid_fixup_preferred(struct drm_connector *connector,
u32 quirks)
{
struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh;
if (list_empty(&connector->probed_modes))
return;
if (quirks & EDID_QUIRK_PREFER_LARGE_60)
target_refresh = 60;
if (quirks & EDID_QUIRK_PREFER_LARGE_75)
target_refresh = 75;
preferred_mode = list_first_entry(&connector->probed_modes,
struct drm_display_mode, head);
list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
if (cur_mode == preferred_mode)
continue;
/* Largest mode is preferred */
if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
preferred_mode = cur_mode;
/* At a given size, try to get closest to target refresh */
if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
MODE_REFRESH_DIFF(cur_mode, target_refresh) <
MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
preferred_mode = cur_mode;
}
}
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
/**
* drm_mode_std - convert standard mode info (width, height, refresh) into mode
* @t: standard timing params
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT.
*
* Punts for now, but should eventually use the FB layer's CVT based mode
* generation code.
*/
struct drm_display_mode *drm_mode_std(struct drm_device *dev,
struct std_timing *t)
{
// struct fb_videomode mode;
// fb_find_mode_cvt(&mode, 0, 0);
/* JJJ: convert to drm_display_mode */
struct drm_display_mode *mode;
int hsize = t->hsize * 8 + 248, vsize;
mode = drm_mode_create(dev);
if (!mode)
return NULL;
if (t->aspect_ratio == 0)
vsize = (hsize * 10) / 16;
else if (t->aspect_ratio == 1)
vsize = (hsize * 3) / 4;
else if (t->aspect_ratio == 2)
vsize = (hsize * 4) / 5;
else
vsize = (hsize * 9) / 16;
drm_mode_set_name(mode);
return mode;
}
/**
* drm_mode_detailed - create a new mode from an EDID detailed timing section
* @dev: DRM device (needed to create new mode)
* @edid: EDID block
* @timing: EDID detailed timing info
* @quirks: quirks to apply
*
* An EDID detailed timing block contains enough info for us to create and
* return a new struct drm_display_mode.
*/
static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
struct edid *edid,
struct detailed_timing *timing,
u32 quirks)
{
struct drm_display_mode *mode;
struct detailed_pixel_timing *pt = &timing->data.pixel_data;
if (pt->stereo) {
printk(KERN_WARNING "stereo mode not supported\n");
return NULL;
}
if (!pt->separate_sync) {
printk(KERN_WARNING "integrated sync not supported\n");
return NULL;
}
mode = drm_mode_create(dev);
if (!mode)
return NULL;
mode->type = DRM_MODE_TYPE_DRIVER;
if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
timing->pixel_clock = 1088;
mode->clock = timing->pixel_clock * 10;
mode->hdisplay = (pt->hactive_hi << 8) | pt->hactive_lo;
mode->hsync_start = mode->hdisplay + ((pt->hsync_offset_hi << 8) |
pt->hsync_offset_lo);
mode->hsync_end = mode->hsync_start +
((pt->hsync_pulse_width_hi << 8) |
pt->hsync_pulse_width_lo);
mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo);
mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo;
mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) |
pt->vsync_offset_lo);
mode->vsync_end = mode->vsync_start +
((pt->vsync_pulse_width_hi << 8) |
pt->vsync_pulse_width_lo);
mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo);
drm_mode_set_name(mode);
if (pt->interlaced)
mode->flags |= DRM_MODE_FLAG_INTERLACE;
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->hsync_positive = 1;
pt->vsync_positive = 1;
}
mode->flags |= pt->hsync_positive ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
mode->flags |= pt->vsync_positive ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
mode->width_mm = pt->width_mm_lo | (pt->width_mm_hi << 8);
mode->height_mm = pt->height_mm_lo | (pt->height_mm_hi << 8);
if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
mode->width_mm *= 10;
mode->height_mm *= 10;
}
if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
mode->width_mm = edid->width_cm * 10;
mode->height_mm = edid->height_cm * 10;
}
return mode;
}
/*
* Detailed mode info for the EDID "established modes" data to use.
*/
static struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
896, 1024, 0, 600, 601, 603, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
704, 832, 0, 480, 489, 491, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
768, 864, 0, 480, 483, 486, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
846, 900, 0, 400, 421, 423, 449, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
846, 900, 0, 400, 412, 414, 449, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
1184, 1328, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
928, 1152, 0, 624, 625, 628, 667, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
896, 1056, 0, 600, 601, 604, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
976, 1040, 0, 600, 637, 643, 666, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
#define EDID_EST_TIMINGS 16
#define EDID_STD_TIMINGS 8
#define EDID_DETAILED_TIMINGS 4
/**
* add_established_modes - get est. modes from EDID and add them
* @edid: EDID block to scan
*
* Each EDID block contains a bitmap of the supported "established modes" list
* (defined above). Tease them out and add them to the global modes list.
*/
static int add_established_modes(struct drm_connector *connector, struct edid *edid)
{
struct drm_device *dev = connector->dev;
unsigned long est_bits = edid->established_timings.t1 |
(edid->established_timings.t2 << 8) |
((edid->established_timings.mfg_rsvd & 0x80) << 9);
int i, modes = 0;
for (i = 0; i <= EDID_EST_TIMINGS; i++)
if (est_bits & (1<<i)) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
return modes;
}
/**
* add_standard_modes - get std. modes from EDID and add them
* @edid: EDID block to scan
*
* Standard modes can be calculated using the CVT standard. Grab them from
* @edid, calculate them, and add them to the list.
*/
static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
{
struct drm_device *dev = connector->dev;
int i, modes = 0;
for (i = 0; i < EDID_STD_TIMINGS; i++) {
struct std_timing *t = &edid->standard_timings[i];
struct drm_display_mode *newmode;
/* If std timings bytes are 1, 1 it's empty */
if (t->hsize == 1 && (t->aspect_ratio | t->vfreq) == 1)
continue;
newmode = drm_mode_std(dev, &edid->standard_timings[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
return modes;
}
/**
* add_detailed_modes - get detailed mode info from EDID data
* @connector: attached connector
* @edid: EDID block to scan
* @quirks: quirks to apply
*
* Some of the detailed timing sections may contain mode information. Grab
* it and add it to the list.
*/
static int add_detailed_info(struct drm_connector *connector,
struct edid *edid, u32 quirks)
{
struct drm_device *dev = connector->dev;
int i, j, modes = 0;
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
struct detailed_timing *timing = &edid->detailed_timings[i];
struct detailed_non_pixel *data = &timing->data.other_data;
struct drm_display_mode *newmode;
/* EDID up to and including 1.2 may put monitor info here */
if (edid->version == 1 && edid->revision < 3)
continue;
/* Detailed mode timing */
if (timing->pixel_clock) {
newmode = drm_mode_detailed(dev, edid, timing, quirks);
if (!newmode)
continue;
/* First detailed mode is preferred */
if (i == 0 && edid->preferred_timing)
newmode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, newmode);
modes++;
continue;
}
/* Other timing or info */
switch (data->type) {
case EDID_DETAIL_MONITOR_SERIAL:
break;
case EDID_DETAIL_MONITOR_STRING:
break;
case EDID_DETAIL_MONITOR_RANGE:
/* Get monitor range data */
break;
case EDID_DETAIL_MONITOR_NAME:
break;
case EDID_DETAIL_MONITOR_CPDATA:
break;
case EDID_DETAIL_STD_MODES:
/* Five modes per detailed section */
for (j = 0; j < 5; i++) {
struct std_timing *std;
struct drm_display_mode *newmode;
std = &data->data.timings[j];
newmode = drm_mode_std(dev, std);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
break;
default:
break;
}
}
return modes;
}
#define DDC_ADDR 0x50
unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter)
{
unsigned char start = 0x0;
unsigned char *buf = kmalloc(EDID_LENGTH, GFP_KERNEL);
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &start,
}, {
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = EDID_LENGTH,
.buf = buf,
}
};
if (!buf) {
dev_warn(&adapter->dev, "unable to allocate memory for EDID "
"block.\n");
return NULL;
}
if (i2c_transfer(adapter, msgs, 2) == 2)
return buf;
dev_info(&adapter->dev, "unable to read EDID block.\n");
kfree(buf);
return NULL;
}
EXPORT_SYMBOL(drm_do_probe_ddc_edid);
static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
{
struct i2c_algo_bit_data *algo_data = adapter->algo_data;
unsigned char *edid = NULL;
int i, j;
algo_data->setscl(algo_data->data, 1);
for (i = 0; i < 3; i++) {
/* For some old monitors we need the
* following process to initialize/stop DDC
*/
algo_data->setsda(algo_data->data, 1);
msleep(13);
algo_data->setscl(algo_data->data, 1);
for (j = 0; j < 5; j++) {
msleep(10);
if (algo_data->getscl(algo_data->data))
break;
}
if (j == 5)
continue;
algo_data->setsda(algo_data->data, 0);
msleep(15);
algo_data->setscl(algo_data->data, 0);
msleep(15);
algo_data->setsda(algo_data->data, 1);
msleep(15);
/* Do the real work */
edid = drm_do_probe_ddc_edid(adapter);
algo_data->setsda(algo_data->data, 0);
algo_data->setscl(algo_data->data, 0);
msleep(15);
algo_data->setscl(algo_data->data, 1);
for (j = 0; j < 10; j++) {
msleep(10);
if (algo_data->getscl(algo_data->data))
break;
}
algo_data->setsda(algo_data->data, 1);
msleep(15);
algo_data->setscl(algo_data->data, 0);
algo_data->setsda(algo_data->data, 0);
if (edid)
break;
}
/* Release the DDC lines when done or the Apple Cinema HD display
* will switch off
*/
algo_data->setsda(algo_data->data, 1);
algo_data->setscl(algo_data->data, 1);
return edid;
}
/**
* drm_get_edid - get EDID data, if available
* @connector: connector we're probing
* @adapter: i2c adapter to use for DDC
*
* Poke the given connector's i2c channel to grab EDID data if possible.
*
* Return edid data or NULL if we couldn't find any.
*/
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
edid = (struct edid *)drm_ddc_read(adapter);
if (!edid) {
dev_warn(&connector->dev->pdev->dev, "%s: no EDID data\n",
drm_get_connector_name(connector));
return NULL;
}
if (!edid_is_valid(edid)) {
dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
kfree(edid);
return NULL;
}
connector->display_info.raw_edid = (char *)edid;
return edid;
}
EXPORT_SYMBOL(drm_get_edid);
/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
* @edid: edid data
*
* Add the specified modes to the connector's mode list.
*
* Return number of modes added or 0 if we couldn't find any.
*/
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
{
int num_modes = 0;
u32 quirks;
if (edid == NULL) {
return 0;
}
if (!edid_is_valid(edid)) {
dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
return 0;
}
quirks = edid_get_quirks(edid);
num_modes += add_established_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_detailed_info(connector, edid, quirks);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
connector->display_info.serration_vsync = edid->serration_vsync;
connector->display_info.sync_on_green = edid->sync_on_green;
connector->display_info.composite_sync = edid->composite_sync;
connector->display_info.separate_syncs = edid->separate_syncs;
connector->display_info.blank_to_black = edid->blank_to_black;
connector->display_info.video_level = edid->video_level;
connector->display_info.digital = edid->digital;
connector->display_info.width_mm = edid->width_cm * 10;
connector->display_info.height_mm = edid->height_cm * 10;
connector->display_info.gamma = edid->gamma;
connector->display_info.gtf_supported = edid->default_gtf;
connector->display_info.standard_color = edid->standard_color;
connector->display_info.display_type = edid->display_type;
connector->display_info.active_off_supported = edid->pm_active_off;
connector->display_info.suspend_supported = edid->pm_suspend;
connector->display_info.standby_supported = edid->pm_standby;
connector->display_info.gamma = edid->gamma;
return num_modes;
}
EXPORT_SYMBOL(drm_add_edid_modes);

178
linux-core/drm_edid.h Normal file
View File

@ -0,0 +1,178 @@
#ifndef __DRM_EDID_H__
#define __DRM_EDID_H__
#include <linux/types.h>
#define EDID_LENGTH 128
#define DDC_ADDR 0x50
#ifdef BIG_ENDIAN
#error "EDID structure is little endian, need big endian versions"
#endif
struct est_timings {
u8 t1;
u8 t2;
u8 mfg_rsvd;
} __attribute__((packed));
struct std_timing {
u8 hsize; /* need to multiply by 8 then add 248 */
u8 vfreq:6; /* need to add 60 */
u8 aspect_ratio:2; /* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
} __attribute__((packed));
/* If detailed data is pixel timing */
struct detailed_pixel_timing {
u8 hactive_lo;
u8 hblank_lo;
u8 hblank_hi:4;
u8 hactive_hi:4;
u8 vactive_lo;
u8 vblank_lo;
u8 vblank_hi:4;
u8 vactive_hi:4;
u8 hsync_offset_lo;
u8 hsync_pulse_width_lo;
u8 vsync_pulse_width_lo:4;
u8 vsync_offset_lo:4;
u8 hsync_pulse_width_hi:2;
u8 hsync_offset_hi:2;
u8 vsync_pulse_width_hi:2;
u8 vsync_offset_hi:2;
u8 width_mm_lo;
u8 height_mm_lo;
u8 height_mm_hi:4;
u8 width_mm_hi:4;
u8 hborder;
u8 vborder;
u8 unknown0:1;
u8 vsync_positive:1;
u8 hsync_positive:1;
u8 separate_sync:2;
u8 stereo:1;
u8 unknown6:1;
u8 interlaced:1;
} __attribute__((packed));
/* If it's not pixel timing, it'll be one of the below */
struct detailed_data_string {
u8 str[13];
} __attribute__((packed));
struct detailed_data_monitor_range {
u8 min_vfreq;
u8 max_vfreq;
u8 min_hfreq_khz;
u8 max_hfreq_khz;
u8 pixel_clock_mhz; /* need to multiply by 10 */
u16 sec_gtf_toggle; /* A000=use above, 20=use below */ /* FIXME: byte order */
u8 hfreq_start_khz; /* need to multiply by 2 */
u8 c; /* need to divide by 2 */
u16 m; /* FIXME: byte order */
u8 k;
u8 j; /* need to divide by 2 */
} __attribute__((packed));
struct detailed_data_wpindex {
u8 white_y_lo:2;
u8 white_x_lo:2;
u8 pad:4;
u8 white_x_hi;
u8 white_y_hi;
u8 gamma; /* need to divide by 100 then add 1 */
} __attribute__((packed));
struct detailed_data_color_point {
u8 windex1;
u8 wpindex1[3];
u8 windex2;
u8 wpindex2[3];
} __attribute__((packed));
struct detailed_non_pixel {
u8 pad1;
u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
fb=color point data, fa=standard timing data,
f9=undefined, f8=mfg. reserved */
u8 pad2;
union {
struct detailed_data_string str;
struct detailed_data_monitor_range range;
struct detailed_data_wpindex color;
struct std_timing timings[5];
} data;
} __attribute__((packed));
#define EDID_DETAIL_STD_MODES 0xfa
#define EDID_DETAIL_MONITOR_CPDATA 0xfb
#define EDID_DETAIL_MONITOR_NAME 0xfc
#define EDID_DETAIL_MONITOR_RANGE 0xfd
#define EDID_DETAIL_MONITOR_STRING 0xfe
#define EDID_DETAIL_MONITOR_SERIAL 0xff
struct detailed_timing {
u16 pixel_clock; /* need to multiply by 10 KHz */ /* FIXME: byte order */
union {
struct detailed_pixel_timing pixel_data;
struct detailed_non_pixel other_data;
} data;
} __attribute__((packed));
struct edid {
u8 header[8];
/* Vendor & product info */
u8 mfg_id[2];
u8 prod_code[2];
u32 serial; /* FIXME: byte order */
u8 mfg_week;
u8 mfg_year;
/* EDID version */
u8 version;
u8 revision;
/* Display info: */
/* input definition */
u8 serration_vsync:1;
u8 sync_on_green:1;
u8 composite_sync:1;
u8 separate_syncs:1;
u8 blank_to_black:1;
u8 video_level:2;
u8 digital:1; /* bits below must be zero if set */
u8 width_cm;
u8 height_cm;
u8 gamma;
/* feature support */
u8 default_gtf:1;
u8 preferred_timing:1;
u8 standard_color:1;
u8 display_type:2; /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
u8 pm_active_off:1;
u8 pm_suspend:1;
u8 pm_standby:1;
/* Color characteristics */
u8 red_green_lo;
u8 black_white_lo;
u8 red_x;
u8 red_y;
u8 green_x;
u8 green_y;
u8 blue_x;
u8 blue_y;
u8 white_x;
u8 white_y;
/* Est. timings and mfg rsvd timings*/
struct est_timings established_timings;
/* Standard timings 1-8*/
struct std_timing standard_timings[8];
/* Detailing timings 1-4 */
struct detailed_timing detailed_timings[4];
/* Number of 128 byte ext. blocks */
u8 extensions;
/* Checksum */
u8 checksum;
} __attribute__((packed));
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
#endif /* __DRM_EDID_H__ */

432
linux-core/drm_fb.c Normal file
View File

@ -0,0 +1,432 @@
/*
* Copyright © 2007 David Airlie
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* David Airlie
*/
/*
* Modularization
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include "drmP.h"
#include "drm_crtc.h"
struct drmfb_par {
struct drm_device *dev;
struct drm_crtc *crtc;
};
static int drmfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
struct drmfb_par *par = info->par;
struct drm_framebuffer *fb = par->crtc->fb;
struct drm_crtc *crtc = par->crtc;
if (regno > 255)
return 1;
if (fb->depth == 8) {
return 0;
}
if (regno < 16) {
switch (fb->depth) {
case 15:
fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) |
((blue & 0xf800) >> 11);
break;
case 16:
fb->pseudo_palette[regno] = (red & 0xf800) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
break;
case 24:
case 32:
fb->pseudo_palette[regno] = ((red & 0xff00) << 8) |
(green & 0xff00) |
((blue & 0xff00) >> 8);
break;
}
}
return 0;
}
static int drmfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct drmfb_par *par = info->par;
struct drm_device *dev = par->dev;
struct drm_framebuffer *fb = par->crtc->fb;
struct drm_display_mode *drm_mode;
struct drm_output *output;
int depth;
if (!var->pixclock)
return -EINVAL;
/* Need to resize the fb object !!! */
if (var->xres > fb->width || var->yres > fb->height) {
DRM_ERROR("Requested width/height is greater than current fb object %dx%d > %dx%d\n",var->xres,var->yres,fb->width,fb->height);
DRM_ERROR("Need resizing code.\n");
return -EINVAL;
}
switch (var->bits_per_pixel) {
case 16:
depth = (var->green.length == 6) ? 16 : 15;
break;
case 32:
depth = (var->transp.length > 0) ? 32 : 24;
break;
default:
depth = var->bits_per_pixel;
break;
}
switch (depth) {
case 8:
var->red.offset = 0;
var->green.offset = 0;
var->blue.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 0;
var->transp.offset = 0;
break;
case 15:
var->red.offset = 10;
var->green.offset = 5;
var->blue.offset = 0;
var->red.length = 5;
var->green.length = 5;
var->blue.length = 5;
var->transp.length = 1;
var->transp.offset = 15;
break;
case 16:
var->red.offset = 11;
var->green.offset = 6;
var->blue.offset = 0;
var->red.length = 5;
var->green.length = 6;
var->blue.length = 5;
var->transp.length = 0;
var->transp.offset = 0;
break;
case 24:
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 0;
var->transp.offset = 0;
break;
case 32:
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 8;
var->transp.offset = 24;
break;
default:
return -EINVAL;
}
#if 0
/* Here we walk the output mode list and look for modes. If we haven't
* got it, then bail. Not very nice, so this is disabled.
* In the set_par code, we create our mode based on the incoming
* parameters. Nicer, but may not be desired by some.
*/
list_for_each_entry(output, &dev->mode_config.output_list, head) {
if (output->crtc == par->crtc)
break;
}
list_for_each_entry(drm_mode, &output->modes, head) {
if (drm_mode->hdisplay == var->xres &&
drm_mode->vdisplay == var->yres &&
drm_mode->clock != 0)
break;
}
if (!drm_mode)
return -EINVAL;
#endif
return 0;
}
/* this will let fbcon do the mode init */
static int drmfb_set_par(struct fb_info *info)
{
struct drmfb_par *par = info->par;
struct drm_framebuffer *fb = par->crtc->fb;
struct drm_device *dev = par->dev;
struct drm_display_mode *drm_mode;
struct fb_var_screeninfo *var = &info->var;
struct drm_output *output;
switch (var->bits_per_pixel) {
case 16:
fb->depth = (var->green.length == 6) ? 16 : 15;
break;
case 32:
fb->depth = (var->transp.length > 0) ? 32 : 24;
break;
default:
fb->depth = var->bits_per_pixel;
break;
}
fb->bits_per_pixel = var->bits_per_pixel;
info->fix.line_length = fb->pitch;
info->fix.smem_len = info->fix.line_length * fb->height;
info->fix.visual = (fb->depth == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
info->screen_size = info->fix.smem_len; /* ??? */
/* Should we walk the output's modelist or just create our own ???
* For now, we create and destroy a mode based on the incoming
* parameters. But there's commented out code below which scans
* the output list too.
*/
#if 0
list_for_each_entry(output, &dev->mode_config.output_list, head) {
if (output->crtc == par->crtc)
break;
}
list_for_each_entry(drm_mode, &output->modes, head) {
if (drm_mode->hdisplay == var->xres &&
drm_mode->vdisplay == var->yres &&
drm_mode->clock != 0)
break;
}
#else
drm_mode = drm_mode_create(dev);
drm_mode->hdisplay = var->xres;
drm_mode->hsync_start = drm_mode->hdisplay + var->right_margin;
drm_mode->hsync_end = drm_mode->hsync_start + var->hsync_len;
drm_mode->htotal = drm_mode->hsync_end + var->left_margin;
drm_mode->vdisplay = var->yres;
drm_mode->vsync_start = drm_mode->vdisplay + var->lower_margin;
drm_mode->vsync_end = drm_mode->vsync_start + var->vsync_len;
drm_mode->vtotal = drm_mode->vsync_end + var->upper_margin;
drm_mode->clock = PICOS2KHZ(var->pixclock);
drm_mode->vrefresh = drm_mode_vrefresh(drm_mode);
drm_mode_set_name(drm_mode);
drm_mode_set_crtcinfo(drm_mode, CRTC_INTERLACE_HALVE_V);
#endif
if (!drm_crtc_set_mode(par->crtc, drm_mode, 0, 0))
return -EINVAL;
/* Have to destroy our created mode if we're not searching the mode
* list for it.
*/
#if 1
drm_mode_destroy(dev, drm_mode);
#endif
return 0;
}
static struct fb_ops drmfb_ops = {
.owner = THIS_MODULE,
// .fb_open = drmfb_open,
// .fb_read = drmfb_read,
// .fb_write = drmfb_write,
// .fb_release = drmfb_release,
// .fb_ioctl = drmfb_ioctl,
.fb_check_var = drmfb_check_var,
.fb_set_par = drmfb_set_par,
.fb_setcolreg = drmfb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
{
struct fb_info *info;
struct drm_framebuffer *fb = crtc->fb;
struct drmfb_par *par;
struct device *device = &dev->pdev->dev;
struct drm_display_mode *mode = crtc->desired_mode;
int ret;
info = framebuffer_alloc(sizeof(struct drmfb_par), device);
if (!info)
return -ENOMEM;
fb->fbdev = info;
par = info->par;
par->dev = dev;
par->crtc = crtc;
info->fbops = &drmfb_ops;
strcpy(info->fix.id, "drmfb");
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.accel = FB_ACCEL_NONE;
info->fix.type_aux = 0;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
info->fix.line_length = fb->pitch;
info->fix.smem_start = fb->offset + dev->mode_config.fb_base;
info->fix.smem_len = info->fix.line_length * fb->height;
info->flags = FBINFO_DEFAULT;
ret = drm_mem_reg_ioremap(dev, &fb->bo->mem, &fb->virtual_base);
if (ret)
DRM_ERROR("error mapping fb: %d\n", ret);
info->screen_base = fb->virtual_base;
info->screen_size = info->fix.smem_len; /* ??? */
info->pseudo_palette = fb->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
info->var.height = -1;
info->var.width = -1;
info->var.vmode = FB_VMODE_NONINTERLACED;
info->var.xres = mode->hdisplay;
info->var.right_margin = mode->hsync_start - mode->hdisplay;
info->var.hsync_len = mode->hsync_end - mode->hsync_start;
info->var.left_margin = mode->htotal - mode->hsync_end;
info->var.yres = mode->vdisplay;
info->var.lower_margin = mode->vsync_start - mode->vdisplay;
info->var.vsync_len = mode->vsync_end - mode->vsync_start;
info->var.upper_margin = mode->vtotal - mode->vsync_end;
info->var.pixclock = 10000000 / mode->htotal * 1000 /
mode->vtotal * 100;
/* avoid overflow */
info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
DRM_DEBUG("fb depth is %d\n", fb->depth);
switch(fb->depth) {
case 8:
info->var.red.offset = 0;
info->var.green.offset = 0;
info->var.blue.offset = 0;
info->var.red.length = 8; /* 8bit DAC */
info->var.green.length = 8;
info->var.blue.length = 8;
info->var.transp.offset = 0;
info->var.transp.length = 0;
break;
case 15:
info->var.red.offset = 10;
info->var.green.offset = 5;
info->var.blue.offset = 0;
info->var.red.length = info->var.green.length =
info->var.blue.length = 5;
info->var.transp.offset = 15;
info->var.transp.length = 1;
break;
case 16:
info->var.red.offset = 11;
info->var.green.offset = 5;
info->var.blue.offset = 0;
info->var.red.length = 5;
info->var.green.length = 6;
info->var.blue.length = 5;
info->var.transp.offset = 0;
info->var.transp.length = 0;
break;
case 24:
info->var.red.offset = 16;
info->var.green.offset = 8;
info->var.blue.offset = 0;
info->var.red.length = info->var.green.length =
info->var.blue.length = 8;
info->var.transp.offset = 0;
info->var.transp.length = 0;
break;
case 32:
info->var.red.offset = 16;
info->var.green.offset = 8;
info->var.blue.offset = 0;
info->var.red.length = info->var.green.length =
info->var.blue.length = 8;
info->var.transp.offset = 24;
info->var.transp.length = 8;
break;
default:
break;
}
if (register_framebuffer(info) < 0) {
unregister_framebuffer(info);
return -EINVAL;
}
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
info->fix.id);
return 0;
}
EXPORT_SYMBOL(drmfb_probe);
int drmfb_remove(struct drm_device *dev, struct drm_crtc *crtc)
{
struct fb_info *info = fb->fbdev;
struct drm_framebuffer *fb = crtc->fb;
if (info) {
drm_mem_reg_iounmap(dev, &fb->bo->mem, fb->virtual_base);
unregister_framebuffer(info);
framebuffer_release(info);
}
return 0;
}
EXPORT_SYMBOL(drmfb_remove);
MODULE_LICENSE("GPL");

View File

@ -134,8 +134,8 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
if (new_type) {
fence->signaled_types |= new_type;
DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
fence->base.hash.key, fence->signaled_types);
DRM_DEBUG("Fence %p signaled 0x%08x\n",
fence, fence->signaled_types);
if (driver->needed_flush)
fc->pending_flush |= driver->needed_flush(fence);
@ -147,8 +147,8 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
if (!(fence->type & ~fence->signaled_types)) {
DRM_DEBUG("Fence completely signaled 0x%08lx\n",
fence->base.hash.key);
DRM_DEBUG("Fence completely signaled %p\n",
fence);
list_del_init(&fence->ring);
}
}
@ -196,10 +196,9 @@ void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
*fence = NULL;
if (atomic_dec_and_test(&tmp_fence->usage)) {
drm_fence_unring(dev, &tmp_fence->ring);
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
tmp_fence->base.hash.key);
DRM_DEBUG("Destroyed a fence object %p\n",
tmp_fence);
atomic_dec(&fm->count);
BUG_ON(!list_empty(&tmp_fence->base.list));
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
}
}
@ -217,7 +216,6 @@ void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
if (atomic_read(&tmp_fence->usage) == 0) {
drm_fence_unring(dev, &tmp_fence->ring);
atomic_dec(&fm->count);
BUG_ON(!list_empty(&tmp_fence->base.list));
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
}
mutex_unlock(&dev->struct_mutex);
@ -244,15 +242,6 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst,
}
EXPORT_SYMBOL(drm_fence_reference_unlocked);
static void drm_fence_object_destroy(struct drm_file *priv,
struct drm_user_object *base)
{
struct drm_fence_object *fence =
drm_user_object_entry(base, struct drm_fence_object, base);
drm_fence_usage_deref_locked(&fence);
}
int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
{
unsigned long flags;
@ -392,7 +381,6 @@ int drm_fence_object_wait(struct drm_fence_object *fence,
if (driver->wait)
return driver->wait(fence, lazy, !ignore_signals, mask);
drm_fence_object_flush(fence, mask);
if (driver->has_irq(dev, fence->fence_class, mask)) {
if (!ignore_signals)
@ -420,8 +408,6 @@ int drm_fence_object_wait(struct drm_fence_object *fence,
}
EXPORT_SYMBOL(drm_fence_object_wait);
int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
uint32_t fence_class, uint32_t type)
{
@ -477,7 +463,6 @@ static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
* Avoid hitting BUG() for kernel-only fence objects.
*/
INIT_LIST_HEAD(&fence->base.list);
fence->fence_class = fence_class;
fence->type = type;
fence->signaled_types = 0;
@ -493,26 +478,6 @@ static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
return ret;
}
int drm_fence_add_user_object(struct drm_file *priv,
struct drm_fence_object *fence, int shareable)
{
struct drm_device *dev = priv->minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_add_user_object(priv, &fence->base, shareable);
if (ret)
goto out;
atomic_inc(&fence->usage);
fence->base.type = drm_fence_type;
fence->base.remove = &drm_fence_object_destroy;
DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
out:
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_fence_add_user_object);
int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
uint32_t type, unsigned flags,
struct drm_fence_object **c_fence)
@ -569,261 +534,7 @@ void drm_fence_manager_init(struct drm_device *dev)
write_unlock_irqrestore(&fm->lock, flags);
}
void drm_fence_fill_arg(struct drm_fence_object *fence,
struct drm_fence_arg *arg)
{
struct drm_device *dev = fence->dev;
struct drm_fence_manager *fm = &dev->fm;
unsigned long irq_flags;
read_lock_irqsave(&fm->lock, irq_flags);
arg->handle = fence->base.hash.key;
arg->fence_class = fence->fence_class;
arg->type = fence->type;
arg->signaled = fence->signaled_types;
arg->error = fence->error;
arg->sequence = fence->sequence;
read_unlock_irqrestore(&fm->lock, irq_flags);
}
EXPORT_SYMBOL(drm_fence_fill_arg);
void drm_fence_manager_takedown(struct drm_device *dev)
{
}
struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
uint32_t handle)
{
struct drm_device *dev = priv->minor->dev;
struct drm_user_object *uo;
struct drm_fence_object *fence;
mutex_lock(&dev->struct_mutex);
uo = drm_lookup_user_object(priv, handle);
if (!uo || (uo->type != drm_fence_type)) {
mutex_unlock(&dev->struct_mutex);
return NULL;
}
fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
mutex_unlock(&dev->struct_mutex);
return fence;
}
int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
if (arg->flags & DRM_FENCE_FLAG_EMIT)
LOCK_TEST_WITH_RETURN(dev, file_priv);
ret = drm_fence_object_create(dev, arg->fence_class,
arg->type, arg->flags, &fence);
if (ret)
return ret;
ret = drm_fence_add_user_object(file_priv, fence,
arg->flags &
DRM_FENCE_FLAG_SHAREABLE);
if (ret) {
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
/*
* usage > 0. No need to lock dev->struct_mutex;
*/
arg->handle = fence->base.hash.key;
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
struct drm_user_object *uo;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
if (ret)
return ret;
fence = drm_lookup_fence_object(file_priv, arg->handle);
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
}
int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
fence = drm_lookup_fence_object(file_priv, arg->handle);
if (!fence)
return -EINVAL;
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
fence = drm_lookup_fence_object(file_priv, arg->handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_flush(fence, arg->type);
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
fence = drm_lookup_fence_object(file_priv, arg->handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_wait(fence,
arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
0, arg->type);
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
LOCK_TEST_WITH_RETURN(dev, file_priv);
fence = drm_lookup_fence_object(file_priv, arg->handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
arg->type);
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized\n");
return -EINVAL;
}
LOCK_TEST_WITH_RETURN(dev, file_priv);
ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
NULL, &fence);
if (ret)
return ret;
if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
ret = drm_fence_add_user_object(file_priv, fence,
arg->flags &
DRM_FENCE_FLAG_SHAREABLE);
if (ret)
return ret;
}
arg->handle = fence->base.hash.key;
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}

View File

@ -43,10 +43,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
static int drm_setup(struct drm_device * dev)
{
drm_local_map_t *map;
int i;
int ret;
int sareapage;
if (dev->driver->firstopen) {
ret = dev->driver->firstopen(dev);
@ -54,20 +52,13 @@ static int drm_setup(struct drm_device * dev)
return ret;
}
dev->magicfree.next = NULL;
/* prebuild the SAREA */
sareapage = max(SAREA_MAX, PAGE_SIZE);
i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
if (i != 0)
return i;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET)) {
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
i = drm_dma_setup(dev);
if (i < 0)
return i;
@ -76,11 +67,8 @@ static int drm_setup(struct drm_device * dev)
for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
atomic_set(&dev->counts[i], 0);
drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
INIT_LIST_HEAD(&dev->magicfree);
dev->sigdata.lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
@ -234,7 +222,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
int minor_id = iminor(inode);
struct drm_file *priv;
int ret;
int i, j;
if (filp->f_flags & O_EXCL)
return -EBUSY; /* No exclusive opens */
@ -259,20 +246,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->refd_objects);
for (i = 0; i < _DRM_NO_REF_TYPES; ++i) {
ret = drm_ht_create(&priv->refd_object_hash[i],
DRM_FILE_HASH_ORDER);
if (ret)
break;
}
if (ret) {
for (j = 0; j < i; ++j)
drm_ht_remove(&priv->refd_object_hash[j]);
goto out_free;
}
INIT_LIST_HEAD(&priv->fbs);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
@ -283,10 +257,42 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_free;
}
mutex_lock(&dev->struct_mutex);
if (list_empty(&dev->filelist))
priv->master = 1;
/* if there is no current master make this fd it */
mutex_lock(&dev->struct_mutex);
if (!priv->minor->master) {
/* create a new master */
priv->minor->master = drm_master_create(priv->minor);
if (!priv->minor->master) {
ret = -ENOMEM;
goto out_free;
}
priv->is_master = 1;
/* take another reference for the copy in the local file priv */
priv->master = drm_master_get(priv->minor->master);
priv->authenticated = 1;
mutex_unlock(&dev->struct_mutex);
if (dev->driver->master_create) {
ret = dev->driver->master_create(dev, priv->master);
if (ret) {
mutex_lock(&dev->struct_mutex);
/* drop both references if this fails */
drm_master_put(&priv->minor->master);
drm_master_put(&priv->master);
mutex_unlock(&dev->struct_mutex);
goto out_free;
}
}
} else {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
mutex_unlock(&dev->struct_mutex);
}
mutex_lock(&dev->struct_mutex);
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->struct_mutex);
@ -332,33 +338,6 @@ int drm_fasync(int fd, struct file *filp, int on)
}
EXPORT_SYMBOL(drm_fasync);
static void drm_object_release(struct file *filp)
{
struct drm_file *priv = filp->private_data;
struct list_head *head;
struct drm_ref_object *ref_object;
int i;
/*
* Free leftover ref objects created by me. Note that we cannot use
* list_for_each() here, as the struct_mutex may be temporarily
* released by the remove_() functions, and thus the lists may be
* altered.
* Also, a drm_remove_ref_object() will not remove it
* from the list unless its refcount is 1.
*/
head = &priv->refd_objects;
while (head->next != head) {
ref_object = list_entry(head->next, struct drm_ref_object, list);
drm_remove_ref_object(priv, ref_object);
head = &priv->refd_objects;
}
for (i = 0; i < _DRM_NO_REF_TYPES; ++i)
drm_ht_remove(&priv->refd_object_hash[i]);
}
/**
* Release file.
*
@ -392,59 +371,63 @@ int drm_release(struct inode *inode, struct file *filp)
current->pid, (long)old_encode_dev(file_priv->minor->device),
dev->open_count);
if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
if (drm_i_have_hw_lock(dev, file_priv)) {
dev->driver->reclaim_buffers_locked(dev, file_priv);
} else {
unsigned long _end=jiffies + 3*DRM_HZ;
int locked = 0;
drm_idlelock_take(&dev->lock);
/*
* Wait for a while.
*/
do{
spin_lock_bh(&dev->lock.spinlock);
locked = dev->lock.idle_has_lock;
spin_unlock_bh(&dev->lock.spinlock);
if (locked)
break;
schedule();
} while (!time_after_eq(jiffies, _end));
if (!locked) {
DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
"\tdriver to use reclaim_buffers_idlelocked() instead.\n"
"\tI will go on reclaiming the buffers anyway.\n");
/* if the master has gone away we can't do anything with the lock */
if (file_priv->minor->master) {
if (dev->driver->reclaim_buffers_locked && file_priv->master->lock.hw_lock) {
if (drm_i_have_hw_lock(dev, file_priv)) {
dev->driver->reclaim_buffers_locked(dev, file_priv);
} else {
unsigned long _end=jiffies + 3*DRM_HZ;
int locked = 0;
drm_idlelock_take(&file_priv->master->lock);
/*
* Wait for a while.
*/
do{
spin_lock_bh(&file_priv->master->lock.spinlock);
locked = file_priv->master->lock.idle_has_lock;
spin_unlock_bh(&file_priv->master->lock.spinlock);
if (locked)
break;
schedule();
} while (!time_after_eq(jiffies, _end));
if (!locked) {
DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
"\tdriver to use reclaim_buffers_idlelocked() instead.\n"
"\tI will go on reclaiming the buffers anyway.\n");
}
dev->driver->reclaim_buffers_locked(dev, file_priv);
drm_idlelock_release(&file_priv->master->lock);
}
dev->driver->reclaim_buffers_locked(dev, file_priv);
drm_idlelock_release(&dev->lock);
}
}
if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
drm_idlelock_take(&dev->lock);
dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
drm_idlelock_release(&dev->lock);
}
if (drm_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
drm_lock_free(&dev->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
}
if (dev->driver->reclaim_buffers_idlelocked && file_priv->master->lock.hw_lock) {
drm_idlelock_take(&file_priv->master->lock);
dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
drm_idlelock_release(&file_priv->master->lock);
}
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!dev->driver->reclaim_buffers_locked) {
dev->driver->reclaim_buffers(dev, file_priv);
if (drm_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
drm_lock_free(&file_priv->master->lock,
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!dev->driver->reclaim_buffers_locked) {
dev->driver->reclaim_buffers(dev, file_priv);
}
}
if (dev->driver->driver_features & DRIVER_GEM)
@ -474,14 +457,29 @@ int drm_release(struct inode *inode, struct file *filp)
}
mutex_unlock(&dev->ctxlist_mutex);
mutex_lock(&dev->struct_mutex);
drm_object_release(filp);
if (file_priv->remove_auth_on_close == 1) {
struct drm_file *temp;
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_fb_release(filp);
list_for_each_entry(temp, &dev->filelist, lhead)
temp->authenticated = 0;
mutex_lock(&dev->struct_mutex);
if (file_priv->is_master) {
struct drm_file *temp;
list_for_each_entry(temp, &dev->filelist, lhead) {
if ((temp->master == file_priv->master) &&
(temp != file_priv))
temp->authenticated = 0;
}
if (file_priv->minor->master == file_priv->master) {
/* drop the reference held my the minor */
drm_master_put(&file_priv->minor->master);
}
}
/* drop the reference held my the file priv */
drm_master_put(&file_priv->master);
file_priv->is_master = 0;
list_del(&file_priv->lhead);
mutex_unlock(&dev->struct_mutex);
@ -496,9 +494,9 @@ int drm_release(struct inode *inode, struct file *filp)
atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count), dev->blocked);
if (atomic_read(&dev->ioctl_count)) {
DRM_ERROR("Device busy: %d\n",
atomic_read(&dev->ioctl_count));
spin_unlock(&dev->count_lock);
unlock_kernel();
return -EBUSY;

View File

@ -105,7 +105,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
kfree(obj);
return NULL;
}
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
@ -264,8 +263,9 @@ again:
spin_lock(&dev->object_name_lock);
if (obj->name) {
args->name = (uint64_t) obj->name;
spin_unlock(&dev->object_name_lock);
return -EEXIST;
return 0;
}
ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
&obj->name);

View File

@ -53,12 +53,13 @@ int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
struct drm_master *master = file_priv->master;
if (u->unique_len >= dev->unique_len) {
if (copy_to_user(u->unique, dev->unique, dev->unique_len))
if (u->unique_len >= master->unique_len) {
if (copy_to_user(u->unique, master->unique, master->unique_len))
return -EFAULT;
}
u->unique_len = dev->unique_len;
u->unique_len = master->unique_len;
return 0;
}
@ -81,36 +82,37 @@ int drm_setunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
struct drm_master *master = file_priv->master;
int domain, bus, slot, func, ret;
if (dev->unique_len || dev->unique)
if (master->unique_len || master->unique)
return -EBUSY;
if (!u->unique_len || u->unique_len > 1024)
return -EINVAL;
dev->unique_len = u->unique_len;
dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER);
if (!dev->unique)
master->unique_len = u->unique_len;
master->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER);
if (!master->unique)
return -ENOMEM;
if (copy_from_user(dev->unique, u->unique, dev->unique_len))
if (copy_from_user(master->unique, u->unique, master->unique_len))
return -EFAULT;
dev->unique[dev->unique_len] = '\0';
master->unique[master->unique_len] = '\0';
dev->devname =
drm_alloc(strlen(dev->driver->pci_driver.name) +
strlen(dev->unique) + 2, DRM_MEM_DRIVER);
strlen(master->unique) + 2, DRM_MEM_DRIVER);
if (!dev->devname)
return -ENOMEM;
sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
dev->unique);
master->unique);
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3)
return -EINVAL;
domain = bus >> 8;
@ -125,33 +127,35 @@ int drm_setunique(struct drm_device *dev, void *data,
return 0;
}
static int drm_set_busid(struct drm_device * dev)
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
int len;
if (dev->unique != NULL)
if (master->unique != NULL)
return -EBUSY;
dev->unique_len = 40;
dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
if (dev->unique == NULL)
master->unique_len = 40;
master->unique = drm_alloc(master->unique_len + 1, DRM_MEM_DRIVER);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
dev->pdev->bus->number,
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
if (len > dev->unique_len)
if (len > master->unique_len)
DRM_ERROR("buffer overflow");
dev->devname =
drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len +
drm_alloc(strlen(dev->driver->pci_driver.name) + master->unique_len +
2, DRM_MEM_DRIVER);
if (dev->devname == NULL)
return -ENOMEM;
sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
dev->unique);
master->unique);
return 0;
}
@ -275,7 +279,7 @@ int drm_getstats(struct drm_device *dev, void *data,
for (i = 0; i < dev->counters; i++) {
if (dev->types[i] == _DRM_STAT_LOCK)
stats->data[i].value =
(dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
(file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
else
stats->data[i].value = atomic_read(&dev->counts[i]);
stats->data[i].type = dev->types[i];
@ -317,7 +321,7 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
/*
* Version 1.1 includes tying of DRM to specific device
*/
drm_set_busid(dev);
drm_set_busid(dev, file_priv);
}
}

View File

@ -96,30 +96,37 @@ static void vblank_disable_fn(unsigned long arg)
static void drm_vblank_cleanup(struct drm_device *dev)
{
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
del_timer(&dev->vblank_disable_timer);
vblank_disable_fn((unsigned long)dev);
drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
if (dev->vbl_queue)
drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
if (dev->vbl_sigs)
drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
if (dev->_vblank_count)
drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
dev->num_crtcs, DRM_MEM_DRIVER);
dev->num_crtcs = 0;
if (dev->vblank_refcount)
drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
dev->num_crtcs, DRM_MEM_DRIVER);
if (dev->vblank_enabled)
drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
dev->num_crtcs, DRM_MEM_DRIVER);
if (dev->last_vblank)
drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
DRM_MEM_DRIVER);
if (dev->vblank_inmodeset)
drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
dev->num_crtcs, DRM_MEM_DRIVER);
}
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
@ -185,6 +192,30 @@ err:
}
EXPORT_SYMBOL(drm_vblank_init);
static void drm_hotplug_cleanup(struct drm_device *dev)
{
if (dev->hotplug_sigs)
drm_free(dev->hotplug_sigs, sizeof(*dev->hotplug_sigs),
DRM_MEM_DRIVER);
}
EXPORT_SYMBOL(drm_hotplug_cleanup);
int drm_hotplug_init(struct drm_device *dev)
{
spin_lock_init(&dev->hotplug_lock);
atomic_set(&dev->hotplug_signal_pending, 0);
dev->hotplug_sigs = drm_alloc(sizeof(struct list_head), DRM_MEM_DRIVER);
if (!dev->hotplug_sigs)
return -ENOMEM;
INIT_LIST_HEAD(dev->hotplug_sigs);
init_waitqueue_head(&dev->hotplug_queue);
return 0;
}
EXPORT_SYMBOL(drm_hotplug_init);
/**
* Install IRQ handler.
*
@ -215,7 +246,7 @@ int drm_irq_install(struct drm_device * dev)
if (dev->irq_enabled) {
mutex_unlock(&dev->struct_mutex);
return -EBUSY;
return 0;
}
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
@ -284,6 +315,8 @@ int drm_irq_uninstall(struct drm_device * dev)
drm_vblank_cleanup(dev);
drm_hotplug_cleanup(dev);
dev->locked_tasklet_func = NULL;
return 0;
@ -313,6 +346,8 @@ int drm_control(struct drm_device *dev, void *data,
case DRM_INST_HANDLER:
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != dev->pdev->irq)
return -EINVAL;
@ -320,6 +355,8 @@ int drm_control(struct drm_device *dev, void *data,
case DRM_UNINST_HANDLER:
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
return drm_irq_uninstall(dev);
default:
return -EINVAL;
@ -560,7 +597,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
if (flags & _DRM_VBLANK_SIGNAL) {
unsigned long irqflags;
struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
struct drm_vbl_sig *vbl_sig;
struct drm_vbl_sig *vbl_sig, *tmp;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
@ -568,7 +605,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
* for the same vblank sequence number; nothing to be done in
* that case
*/
list_for_each_entry(vbl_sig, vbl_sigs, head) {
list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
if (vbl_sig->sequence == vblwait->request.sequence
&& vbl_sig->info.si_signo ==
vblwait->request.signal
@ -692,6 +729,53 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_handle_vblank);
/**
* Send the HOTPLUG signals.
*
* \param dev DRM device.
*
* Sends a signal for each task in drm_device::hotplug_sigs and empties the list.
*/
static void drm_hotplug_send_signals(struct drm_device * dev)
{
struct drm_hotplug_sig *hotplug_sig, *tmp;
struct list_head *hotplug_sigs;
unsigned long flags;
spin_lock_irqsave(&dev->hotplug_lock, flags);
hotplug_sigs = dev->hotplug_sigs;
list_for_each_entry_safe(hotplug_sig, tmp, hotplug_sigs, head) {
hotplug_sig->info.si_code = hotplug_sig->counter;
send_sig_info(hotplug_sig->info.si_signo,
&hotplug_sig->info, hotplug_sig->task);
list_del(&hotplug_sig->head);
drm_free(hotplug_sig, sizeof(*hotplug_sig),
DRM_MEM_DRIVER);
atomic_dec(&dev->hotplug_signal_pending);
}
spin_unlock_irqrestore(&dev->hotplug_lock, flags);
}
/**
* drm_handle_hotplug - handle a hotplug event
* @dev: DRM device
* @crtc: where this event occurred
*
* Drivers should call this routine in their hotplug interrupt handlers.
*/
void drm_handle_hotplug(struct drm_device *dev)
{
DRM_WAKEUP(&dev->hotplug_queue);
drm_hotplug_send_signals(dev);
}
EXPORT_SYMBOL(drm_handle_hotplug);
/**
* Tasklet wrapper function.
*
@ -712,12 +796,12 @@ static void drm_locked_tasklet_func(unsigned long data)
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
if (!tasklet_func ||
!drm_lock_take(&dev->lock,
!drm_lock_take(&dev->primary->master->lock,
DRM_KERNEL_CONTEXT)) {
return;
}
dev->lock.lock_time = jiffies;
dev->primary->master->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
@ -728,7 +812,7 @@ static void drm_locked_tasklet_func(unsigned long data)
if (tasklet_func != NULL)
tasklet_func(dev);
drm_lock_free(&dev->lock,
drm_lock_free(&dev->primary->master->lock,
DRM_KERNEL_CONTEXT);
}

View File

@ -52,6 +52,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DECLARE_WAITQUEUE(entry, current);
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
int ret = 0;
++file_priv->lock_count;
@ -64,26 +65,27 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock->context, current->pid,
dev->lock.hw_lock->lock, lock->flags);
master->lock.hw_lock->lock, lock->flags);
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
if (lock->context < 0)
return -EINVAL;
add_wait_queue(&dev->lock.lock_queue, &entry);
spin_lock_bh(&dev->lock.spinlock);
dev->lock.user_waiters++;
spin_unlock_bh(&dev->lock.spinlock);
add_wait_queue(&master->lock.lock_queue, &entry);
spin_lock_bh(&master->lock.spinlock);
master->lock.user_waiters++;
spin_unlock_bh(&master->lock.spinlock);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!dev->lock.hw_lock) {
if (!master->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock, lock->context)) {
dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
if (drm_lock_take(&master->lock, lock->context)) {
master->lock.file_priv = file_priv;
master->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
@ -95,11 +97,11 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
break;
}
}
spin_lock_bh(&dev->lock.spinlock);
dev->lock.user_waiters--;
spin_unlock_bh(&dev->lock.spinlock);
spin_lock_bh(&master->lock.spinlock);
master->lock.user_waiters--;
spin_unlock_bh(&master->lock.spinlock);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
remove_wait_queue(&master->lock.lock_queue, &entry);
DRM_DEBUG("%d %s\n", lock->context,
ret ? "interrupted" : "has lock");
@ -108,14 +110,14 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
/* don't set the block all signals on the master process for now
* really probably not the correct answer but lets us debug xkb
* xserver for now */
if (!file_priv->master) {
if (!file_priv->is_master) {
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
sigaddset(&dev->sigmask, SIGTSTP);
sigaddset(&dev->sigmask, SIGTTIN);
sigaddset(&dev->sigmask, SIGTTOU);
dev->sigdata.context = lock->context;
dev->sigdata.lock = dev->lock.hw_lock;
dev->sigdata.lock = master->lock.hw_lock;
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
}
@ -154,6 +156,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
unsigned long irqflags;
void (*tasklet_func)(struct drm_device *);
@ -178,7 +181,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
if (dev->driver->kernel_context_switch_unlock)
dev->driver->kernel_context_switch_unlock(dev);
else {
if (drm_lock_free(&dev->lock,lock->context)) {
if (drm_lock_free(&master->lock,lock->context)) {
/* FIXME: Should really bail out here. */
}
}
@ -216,16 +219,22 @@ int drm_lock_take(struct drm_lock_data *lock_data,
} while (prev != old);
spin_unlock_bh(&lock_data->spinlock);
/* Warn on recursive locking of user contexts. */
if (_DRM_LOCKING_CONTEXT(old) == context && _DRM_LOCK_IS_HELD(old)) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
}
return 0;
}
return 0;
}
return !_DRM_LOCK_IS_HELD(old);
if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
/* Have lock */
return 1;
}
return 0;
}
/**
@ -380,10 +389,10 @@ EXPORT_SYMBOL(drm_idlelock_release);
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
return (file_priv->lock_count && dev->lock.hw_lock &&
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
dev->lock.file_priv == file_priv);
struct drm_master *master = file_priv->master;
return (file_priv->lock_count && master->lock.hw_lock &&
_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
master->lock.file_priv == file_priv);
}
EXPORT_SYMBOL(drm_i_have_hw_lock);

View File

@ -188,6 +188,7 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
}
return pt;
}
EXPORT_SYMBOL(drm_realloc);
/**
* Allocate pages.

View File

@ -294,5 +294,4 @@ void drm_mm_takedown(struct drm_mm * mm)
list_del(&entry->ml_entry);
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
}
EXPORT_SYMBOL(drm_mm_takedown);

1
linux-core/drm_mode.h Symbolic link
View File

@ -0,0 +1 @@
../shared-core/drm_mode.h

570
linux-core/drm_modes.c Normal file
View File

@ -0,0 +1,570 @@
/*
* Copyright © 1997-2003 by The XFree86 Project, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the name of the copyright holder(s)
* and author(s) shall not be used in advertising or otherwise to promote
* the sale, use or other dealings in this Software without prior written
* authorization from the copyright holder(s) and author(s).
*/
/*
* Copyright © 2007 Dave Airlie
*/
#include <linux/list.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
/**
* drm_mode_debug_printmodeline - debug print a mode
* @dev: DRM device
* @mode: mode to print
*
* LOCKING:
* None.
*
* Describe @mode using DRM_DEBUG.
*/
void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
{
DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
mode->base.id, mode->name, mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start,
mode->hsync_end, mode->htotal,
mode->vdisplay, mode->vsync_start,
mode->vsync_end, mode->vtotal, mode->type, mode->flags);
}
EXPORT_SYMBOL(drm_mode_debug_printmodeline);
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
*
* LOCKING:
* None.
*
* Set the name of @mode to a standard format.
*/
void drm_mode_set_name(struct drm_display_mode *mode)
{
snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
mode->vdisplay);
}
EXPORT_SYMBOL(drm_mode_set_name);
/**
* drm_mode_list_concat - move modes from one list to another
* @head: source list
* @new: dst list
*
* LOCKING:
* Caller must ensure both lists are locked.
*
* Move all the modes from @head to @new.
*/
void drm_mode_list_concat(struct list_head *head, struct list_head *new)
{
struct list_head *entry, *tmp;
list_for_each_safe(entry, tmp, head) {
list_move_tail(entry, new);
}
}
EXPORT_SYMBOL(drm_mode_list_concat);
/**
* drm_mode_width - get the width of a mode
* @mode: mode
*
* LOCKING:
* None.
*
* Return @mode's width (hdisplay) value.
*
* FIXME: is this needed?
*
* RETURNS:
* @mode->hdisplay
*/
int drm_mode_width(struct drm_display_mode *mode)
{
return mode->hdisplay;
}
EXPORT_SYMBOL(drm_mode_width);
/**
* drm_mode_height - get the height of a mode
* @mode: mode
*
* LOCKING:
* None.
*
* Return @mode's height (vdisplay) value.
*
* FIXME: is this needed?
*
* RETURNS:
* @mode->vdisplay
*/
int drm_mode_height(struct drm_display_mode *mode)
{
return mode->vdisplay;
}
EXPORT_SYMBOL(drm_mode_height);
/**
* drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode
*
* LOCKING:
* None.
*
* Return @mode's vrefresh rate or calculate it if necessary.
*
* FIXME: why is this needed? shouldn't vrefresh be set already?
*
* RETURNS:
* Vertical refresh rate of @mode x 1000. For precision reasons.
*/
int drm_mode_vrefresh(struct drm_display_mode *mode)
{
int refresh = 0;
unsigned int calc_val;
if (mode->vrefresh > 0)
refresh = mode->vrefresh;
else if (mode->htotal > 0 && mode->vtotal > 0) {
/* work out vrefresh the value will be x1000 */
calc_val = (mode->clock * 1000);
calc_val /= mode->htotal;
calc_val *= 1000;
calc_val /= mode->vtotal;
refresh = calc_val;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
refresh *= 2;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
refresh /= 2;
if (mode->vscan > 1)
refresh /= mode->vscan;
}
return refresh;
}
EXPORT_SYMBOL(drm_mode_vrefresh);
/**
* drm_mode_set_crtcinfo - set CRTC modesetting parameters
* @p: mode
* @adjust_flags: unused? (FIXME)
*
* LOCKING:
* None.
*
* Setup the CRTC modesetting parameters for @p, adjusting if necessary.
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
return;
p->crtc_hdisplay = p->hdisplay;
p->crtc_hsync_start = p->hsync_start;
p->crtc_hsync_end = p->hsync_end;
p->crtc_htotal = p->htotal;
p->crtc_hskew = p->hskew;
p->crtc_vdisplay = p->vdisplay;
p->crtc_vsync_start = p->vsync_start;
p->crtc_vsync_end = p->vsync_end;
p->crtc_vtotal = p->vtotal;
if (p->flags & DRM_MODE_FLAG_INTERLACE) {
if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
p->crtc_vdisplay /= 2;
p->crtc_vsync_start /= 2;
p->crtc_vsync_end /= 2;
p->crtc_vtotal /= 2;
}
p->crtc_vtotal |= 1;
}
if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
p->crtc_vdisplay *= 2;
p->crtc_vsync_start *= 2;
p->crtc_vsync_end *= 2;
p->crtc_vtotal *= 2;
}
if (p->vscan > 1) {
p->crtc_vdisplay *= p->vscan;
p->crtc_vsync_start *= p->vscan;
p->crtc_vsync_end *= p->vscan;
p->crtc_vtotal *= p->vscan;
}
p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
p->crtc_hadjusted = false;
p->crtc_vadjusted = false;
}
EXPORT_SYMBOL(drm_mode_set_crtcinfo);
/**
* drm_mode_duplicate - allocate and duplicate an existing mode
* @m: mode to duplicate
*
* LOCKING:
* None.
*
* Just allocate a new mode, copy the existing mode into it, and return
* a pointer to it. Used to create new instances of established modes.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
int new_id;
nmode = drm_mode_create(dev);
if (!nmode)
return NULL;
new_id = nmode->base.id;
*nmode = *mode;
nmode->base.id = new_id;
INIT_LIST_HEAD(&nmode->head);
return nmode;
}
EXPORT_SYMBOL(drm_mode_duplicate);
/**
* drm_mode_equal - test modes for equality
* @mode1: first mode
* @mode2: second mode
*
* LOCKING:
* None.
*
* Check to see if @mode1 and @mode2 are equivalent.
*
* RETURNS:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
{
/* do clock check convert to PICOS so fb modes get matched
* the same */
if (mode1->clock && mode2->clock) {
if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
return false;
} else if (mode1->clock != mode2->clock)
return false;
if (mode1->hdisplay == mode2->hdisplay &&
mode1->hsync_start == mode2->hsync_start &&
mode1->hsync_end == mode2->hsync_end &&
mode1->htotal == mode2->htotal &&
mode1->hskew == mode2->hskew &&
mode1->vdisplay == mode2->vdisplay &&
mode1->vsync_start == mode2->vsync_start &&
mode1->vsync_end == mode2->vsync_end &&
mode1->vtotal == mode2->vtotal &&
mode1->vscan == mode2->vscan &&
mode1->flags == mode2->flags)
return true;
return false;
}
EXPORT_SYMBOL(drm_mode_equal);
/**
* drm_mode_validate_size - make sure modes adhere to size constraints
* @dev: DRM device
* @mode_list: list of modes to check
* @maxX: maximum width
* @maxY: maximum height
* @maxPitch: max pitch
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* The DRM device (@dev) has size and pitch limits. Here we validate the
* modes we probed for @dev against those limits and set their status as
* necessary.
*/
void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
int maxX, int maxY, int maxPitch)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, mode_list, head) {
if (maxPitch > 0 && mode->hdisplay > maxPitch)
mode->status = MODE_BAD_WIDTH;
if (maxX > 0 && mode->hdisplay > maxX)
mode->status = MODE_VIRTUAL_X;
if (maxY > 0 && mode->vdisplay > maxY)
mode->status = MODE_VIRTUAL_Y;
}
}
EXPORT_SYMBOL(drm_mode_validate_size);
/**
* drm_mode_validate_clocks - validate modes against clock limits
* @dev: DRM device
* @mode_list: list of modes to check
* @min: minimum clock rate array
* @max: maximum clock rate array
* @n_ranges: number of clock ranges (size of arrays)
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* Some code may need to check a mode list against the clock limits of the
* device in question. This function walks the mode list, testing to make
* sure each mode falls within a given range (defined by @min and @max
* arrays) and sets @mode->status as needed.
*/
void drm_mode_validate_clocks(struct drm_device *dev,
struct list_head *mode_list,
int *min, int *max, int n_ranges)
{
struct drm_display_mode *mode;
int i;
list_for_each_entry(mode, mode_list, head) {
bool good = false;
for (i = 0; i < n_ranges; i++) {
if (mode->clock >= min[i] && mode->clock <= max[i]) {
good = true;
break;
}
}
if (!good)
mode->status = MODE_CLOCK_RANGE;
}
}
EXPORT_SYMBOL(drm_mode_validate_clocks);
/**
* drm_mode_prune_invalid - remove invalid modes from mode list
* @dev: DRM device
* @mode_list: list of modes to check
* @verbose: be verbose about it
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* Once mode list generation is complete, a caller can use this routine to
* remove invalid modes from a mode list. If any of the modes have a
* status other than %MODE_OK, they are removed from @mode_list and freed.
*/
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose)
{
struct drm_display_mode *mode, *t;
list_for_each_entry_safe(mode, t, mode_list, head) {
if (mode->status != MODE_OK) {
list_del(&mode->head);
if (verbose) {
drm_mode_debug_printmodeline(mode);
DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status);
}
drm_mode_destroy(dev, mode);
}
}
}
EXPORT_SYMBOL(drm_mode_prune_invalid);
/**
* drm_mode_compare - compare modes for favorability
* @lh_a: list_head for first mode
* @lh_b: list_head for second mode
*
* LOCKING:
* None.
*
* Compare two modes, given by @lh_a and @lh_b, returning a value indicating
* which is better.
*
* RETURNS:
* Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
* positive if @lh_b is better than @lh_a.
*/
static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
{
struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
int diff;
diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
if (diff)
return diff;
diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
if (diff)
return diff;
diff = b->clock - a->clock;
return diff;
}
/* FIXME: what we don't have a list sort function? */
/* list sort from Mark J Roberts (mjr@znex.org) */
void list_sort(struct list_head *head, int (*cmp)(struct list_head *a, struct list_head *b))
{
struct list_head *p, *q, *e, *list, *tail, *oldhead;
int insize, nmerges, psize, qsize, i;
list = head->next;
list_del(head);
insize = 1;
for (;;) {
p = oldhead = list;
list = tail = NULL;
nmerges = 0;
while (p) {
nmerges++;
q = p;
psize = 0;
for (i = 0; i < insize; i++) {
psize++;
q = q->next == oldhead ? NULL : q->next;
if (!q)
break;
}
qsize = insize;
while (psize > 0 || (qsize > 0 && q)) {
if (!psize) {
e = q;
q = q->next;
qsize--;
if (q == oldhead)
q = NULL;
} else if (!qsize || !q) {
e = p;
p = p->next;
psize--;
if (p == oldhead)
p = NULL;
} else if (cmp(p, q) <= 0) {
e = p;
p = p->next;
psize--;
if (p == oldhead)
p = NULL;
} else {
e = q;
q = q->next;
qsize--;
if (q == oldhead)
q = NULL;
}
if (tail)
tail->next = e;
else
list = e;
e->prev = tail;
tail = e;
}
p = q;
}
tail->next = list;
list->prev = tail;
if (nmerges <= 1)
break;
insize *= 2;
}
head->next = list;
head->prev = list->prev;
list->prev->next = head;
list->prev = head;
}
/**
* drm_mode_sort - sort mode list
* @mode_list: list to sort
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* Sort @mode_list by favorability, putting good modes first.
*/
void drm_mode_sort(struct list_head *mode_list)
{
list_sort(mode_list, drm_mode_compare);
}
EXPORT_SYMBOL(drm_mode_sort);
/**
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
* list and only adds different modes. All modes unverified after this point
* will be removed by the prune invalid modes.
*/
void drm_mode_connector_list_update(struct drm_connector *connector)
{
struct drm_display_mode *mode;
struct drm_display_mode *pmode, *pt;
int found_it;
list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
head) {
found_it = 0;
/* go through current modes checking for the new probed mode */
list_for_each_entry(mode, &connector->modes, head) {
if (drm_mode_equal(pmode, mode)) {
found_it = 1;
/* if equal delete the probed mode */
mode->status = pmode->status;
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;
}
}
if (!found_it) {
list_move_tail(&pmode->head, &connector->modes);
}
}
}
EXPORT_SYMBOL(drm_mode_connector_list_update);

View File

@ -1,294 +0,0 @@
/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
int shareable)
{
struct drm_device *dev = priv->minor->dev;
int ret;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
/* The refcount will be bumped to 1 when we add the ref object below. */
atomic_set(&item->refcount, 0);
item->shareable = shareable;
item->owner = priv;
ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
(unsigned long)item, 31, 0, 0);
if (ret)
return ret;
ret = drm_add_ref_object(priv, item, _DRM_REF_USE);
if (ret)
ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
return ret;
}
EXPORT_SYMBOL(drm_add_user_object);
struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
{
struct drm_device *dev = priv->minor->dev;
struct drm_hash_item *hash;
int ret;
struct drm_user_object *item;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
ret = drm_ht_find_item(&dev->object_hash, key, &hash);
if (ret)
return NULL;
item = drm_hash_entry(hash, struct drm_user_object, hash);
if (priv != item->owner) {
struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE];
ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
if (ret) {
DRM_ERROR("Object not registered for usage\n");
return NULL;
}
}
return item;
}
EXPORT_SYMBOL(drm_lookup_user_object);
static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
{
struct drm_device *dev = priv->minor->dev;
int ret;
if (atomic_dec_and_test(&item->refcount)) {
ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
BUG_ON(ret);
item->remove(priv, item);
}
}
static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,
enum drm_ref_type action)
{
int ret = 0;
switch (action) {
case _DRM_REF_USE:
atomic_inc(&ro->refcount);
break;
default:
if (!ro->ref_struct_locked) {
break;
} else {
ro->ref_struct_locked(priv, ro, action);
}
}
return ret;
}
int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,
enum drm_ref_type ref_action)
{
int ret = 0;
struct drm_ref_object *item;
struct drm_open_hash *ht = &priv->refd_object_hash[ref_action];
DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
if (!referenced_object->shareable && priv != referenced_object->owner) {
DRM_ERROR("Not allowed to reference this object\n");
return -EINVAL;
}
/*
* If this is not a usage reference, Check that usage has been registered
* first. Otherwise strange things may happen on destruction.
*/
if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
item =
drm_lookup_ref_object(priv, referenced_object,
_DRM_REF_USE);
if (!item) {
DRM_ERROR
("Object not registered for usage by this client\n");
return -EINVAL;
}
}
if (NULL !=
(item =
drm_lookup_ref_object(priv, referenced_object, ref_action))) {
atomic_inc(&item->refcount);
return drm_object_ref_action(priv, referenced_object,
ref_action);
}
item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
if (item == NULL) {
DRM_ERROR("Could not allocate reference object\n");
return -ENOMEM;
}
atomic_set(&item->refcount, 1);
item->hash.key = (unsigned long)referenced_object;
ret = drm_ht_insert_item(ht, &item->hash);
item->unref_action = ref_action;
if (ret)
goto out;
list_add(&item->list, &priv->refd_objects);
ret = drm_object_ref_action(priv, referenced_object, ref_action);
out:
return ret;
}
struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
struct drm_user_object *referenced_object,
enum drm_ref_type ref_action)
{
struct drm_hash_item *hash;
int ret;
DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
(unsigned long)referenced_object, &hash);
if (ret)
return NULL;
return drm_hash_entry(hash, struct drm_ref_object, hash);
}
EXPORT_SYMBOL(drm_lookup_ref_object);
static void drm_remove_other_references(struct drm_file *priv,
struct drm_user_object *ro)
{
int i;
struct drm_open_hash *ht;
struct drm_hash_item *hash;
struct drm_ref_object *item;
for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
ht = &priv->refd_object_hash[i];
while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
item = drm_hash_entry(hash, struct drm_ref_object, hash);
drm_remove_ref_object(priv, item);
}
}
}
void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)
{
int ret;
struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action];
enum drm_ref_type unref_action;
DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
unref_action = item->unref_action;
if (atomic_dec_and_test(&item->refcount)) {
ret = drm_ht_remove_item(ht, &item->hash);
BUG_ON(ret);
list_del_init(&item->list);
if (unref_action == _DRM_REF_USE)
drm_remove_other_references(priv, user_object);
drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS);
}
switch (unref_action) {
case _DRM_REF_USE:
drm_deref_user_object(priv, user_object);
break;
default:
BUG_ON(!user_object->unref);
user_object->unref(priv, user_object, unref_action);
break;
}
}
EXPORT_SYMBOL(drm_remove_ref_object);
int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
enum drm_object_type type, struct drm_user_object **object)
{
struct drm_device *dev = priv->minor->dev;
struct drm_user_object *uo;
struct drm_hash_item *hash;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_ht_find_item(&dev->object_hash, user_token, &hash);
if (ret) {
DRM_ERROR("Could not find user object to reference.\n");
goto out_err;
}
uo = drm_hash_entry(hash, struct drm_user_object, hash);
if (uo->type != type) {
ret = -EINVAL;
goto out_err;
}
ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
if (ret)
goto out_err;
mutex_unlock(&dev->struct_mutex);
*object = uo;
return 0;
out_err:
mutex_unlock(&dev->struct_mutex);
return ret;
}
int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
enum drm_object_type type)
{
struct drm_device *dev = priv->minor->dev;
struct drm_user_object *uo;
struct drm_ref_object *ro;
int ret;
mutex_lock(&dev->struct_mutex);
uo = drm_lookup_user_object(priv, user_token);
if (!uo || (uo->type != type)) {
ret = -EINVAL;
goto out_err;
}
ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
if (!ro) {
ret = -EINVAL;
goto out_err;
}
drm_remove_ref_object(priv, ro);
mutex_unlock(&dev->struct_mutex);
return 0;
out_err:
mutex_unlock(&dev->struct_mutex);
return ret;
}

View File

@ -34,110 +34,212 @@
struct drm_device;
struct drm_bo_mem_reg;
/***************************************************
* User space objects. (drm_object.c)
*/
#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
enum drm_object_type {
drm_fence_type,
drm_buffer_type,
drm_lock_type,
/*
* Add other user space object types here.
*/
drm_driver_type0 = 256,
drm_driver_type1,
drm_driver_type2,
drm_driver_type3,
drm_driver_type4
};
/*
* A user object is a structure that helps the drm give out user handles
* to kernel internal objects and to keep track of these objects so that
* they can be destroyed, for example when the user space process exits.
* Designed to be accessible using a user space 32-bit handle.
*/
struct drm_user_object {
struct drm_hash_item hash;
struct list_head list;
enum drm_object_type type;
atomic_t refcount;
int shareable;
struct drm_file *owner;
void (*ref_struct_locked) (struct drm_file *priv,
struct drm_user_object *obj,
enum drm_ref_type ref_action);
void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
enum drm_ref_type unref_action);
void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
};
/*
* A ref object is a structure which is used to
* keep track of references to user objects and to keep track of these
* references so that they can be destroyed for example when the user space
* process exits. Designed to be accessible using a pointer to the _user_ object.
*/
struct drm_ref_object {
struct drm_hash_item hash;
struct list_head list;
atomic_t refcount;
enum drm_ref_type unref_action;
};
#define DRM_FENCE_FLAG_EMIT 0x00000001
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
/**
* Must be called with the struct_mutex held.
* On hardware with no interrupt events for operation completion,
* indicates that the kernel should sleep while waiting for any blocking
* operation to complete rather than spinning.
*
* Has no effect otherwise.
*/
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
#define DRM_FENCE_FLAG_NO_USER 0x00000010
/* Reserved for driver use */
#define DRM_FENCE_MASK_DRIVER 0xFF000000
#define DRM_FENCE_TYPE_EXE 0x00000001
struct drm_fence_arg {
unsigned int handle;
unsigned int fence_class;
unsigned int type;
unsigned int flags;
unsigned int signaled;
unsigned int error;
unsigned int sequence;
unsigned int pad64;
uint64_t expand_pad[2]; /*Future expansion */
};
/* Buffer permissions, referring to how the GPU uses the buffers.
* these translate to fence types used for the buffers.
* Typically a texture buffer is read, A destination buffer is write and
* a command (batch-) buffer is exe. Can be or-ed together.
*/
extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
int shareable);
#define DRM_BO_FLAG_READ (1ULL << 0)
#define DRM_BO_FLAG_WRITE (1ULL << 1)
#define DRM_BO_FLAG_EXE (1ULL << 2)
/*
* All of the bits related to access mode
*/
#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
/*
* Status flags. Can be read to determine the actual state of a buffer.
* Can also be set in the buffer mask before validation.
*/
/*
* Mask: Never evict this buffer. Not even with force. This type of buffer is only
* available to root and must be manually removed before buffer manager shutdown
* or lock.
* Flags: Acknowledge
*/
#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
/*
* Mask: Require that the buffer is placed in mappable memory when validated.
* If not set the buffer may or may not be in mappable memory when validated.
* Flags: If set, the buffer is in mappable memory.
*/
#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
/* Mask: The buffer should be shareable with other processes.
* Flags: The buffer is shareable with other processes.
*/
#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
/* Mask: If set, place the buffer in cache-coherent memory if available.
* If clear, never place the buffer in cache coherent memory if validated.
* Flags: The buffer is currently in cache-coherent memory.
*/
#define DRM_BO_FLAG_CACHED (1ULL << 7)
/* Mask: Make sure that every time this buffer is validated,
* it ends up on the same location provided that the memory mask is the same.
* The buffer will also not be evicted when claiming space for
* other buffers. Basically a pinned buffer but it may be thrown out as
* part of buffer manager shutdown or locking.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
/*
* Mask: if set the note the buffer contents are discardable
* Flags: if set the buffer contents are discardable on migration
*/
#define DRM_BO_FLAG_DISCARDABLE (1ULL << 9)
/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction
* with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
* with unsnooped PTEs instead of snooped, by using chipset-specific cache
* flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
* as the eviction to local memory (TTM unbind) on map is just a side effect
* to prevent aggressive cache prefetch from the GPU disturbing the cache
* management that the DRM is doing.
*
* Flags: Acknowledge.
* Buffers allocated with this flag should not be used for suballocators
* This type may have issues on CPUs with over-aggressive caching
* http://marc.info/?l=linux-kernel&m=102376926732464&w=2
*/
#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
/*
* Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
#define DRM_BO_FLAG_TILE (1ULL << 15)
/*
* Buffer has been mapped or touched since creation
* for VRAM we don't need to migrate, just fill with 0s for non-dirty
*/
#define DRM_BO_FLAG_CLEAN (1ULL << 16)
/*
* Memory type flags that can be or'ed together in the mask, but only
* one appears in flags.
*/
/* System memory */
#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
/* Translation table memory */
#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
/* Vram memory */
#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
/* Up to the driver to define. */
#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
/* We can add more of these now with a 64-bit flag type */
/*
* This is a mask covering all of the memory type flags; easier to just
* use a single constant than a bunch of | values. It covers
* DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
*/
#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
/*
* This adds all of the CPU-mapping options in with the memory
* type to label all bits which change how the page gets mapped
*/
#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
DRM_BO_FLAG_CACHED_MAPPED | \
DRM_BO_FLAG_CACHED | \
DRM_BO_FLAG_MAPPABLE)
/* Driver-private flags */
#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
/*
* Don't block on validate and map. Instead, return EBUSY.
*/
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
/*
* Don't place this buffer on the unfenced list. This means
* that the buffer will not end up having a fence associated
* with it as a result of this operation
*/
#define DRM_BO_HINT_DONT_FENCE 0x00000004
/**
* Must be called with the struct_mutex held.
* On hardware with no interrupt events for operation completion,
* indicates that the kernel should sleep while waiting for any blocking
* operation to complete rather than spinning.
*
* Has no effect otherwise.
*/
extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
uint32_t key);
#define DRM_BO_HINT_WAIT_LAZY 0x00000008
/*
* Must be called with the struct_mutex held. May temporarily release it.
* The client has compute relocations refering to this buffer using the
* offset in the presumed_offset field. If that offset ends up matching
* where this buffer lands, the kernel is free to skip executing those
* relocations
*/
#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
extern int drm_add_ref_object(struct drm_file *priv,
struct drm_user_object *referenced_object,
enum drm_ref_type ref_action);
#define DRM_BO_MEM_LOCAL 0
#define DRM_BO_MEM_TT 1
#define DRM_BO_MEM_VRAM 2
#define DRM_BO_MEM_PRIV0 3
#define DRM_BO_MEM_PRIV1 4
#define DRM_BO_MEM_PRIV2 5
#define DRM_BO_MEM_PRIV3 6
#define DRM_BO_MEM_PRIV4 7
/*
* Must be called with the struct_mutex held.
*/
#define DRM_BO_MEM_TYPES 8 /* For now. */
struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
struct drm_user_object *referenced_object,
enum drm_ref_type ref_action);
/*
* Must be called with the struct_mutex held.
* If "item" has been obtained by a call to drm_lookup_ref_object. You may not
* release the struct_mutex before calling drm_remove_ref_object.
* This function may temporarily release the struct_mutex.
*/
#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
enum drm_object_type type,
struct drm_user_object **object);
extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
enum drm_object_type type);
/***************************************************
* Fence objects. (drm_fence.c)
*/
struct drm_fence_object {
struct drm_user_object base;
struct drm_device *dev;
atomic_t usage;
@ -156,7 +258,6 @@ struct drm_fence_object {
};
#define _DRM_FENCE_CLASSES 8
#define _DRM_FENCE_TYPE_EXE 0x00
struct drm_fence_class_manager {
struct list_head ring;
@ -471,7 +572,6 @@ enum drm_bo_type {
struct drm_buffer_object {
struct drm_device *dev;
struct drm_user_object base;
/*
* If there is a possibility that the usage variable is zero,
@ -547,7 +647,7 @@ struct drm_mem_type_manager {
};
struct drm_bo_lock {
struct drm_user_object base;
// struct drm_user_object base;
wait_queue_head_t queue;
atomic_t write_lock_pending;
atomic_t readers;
@ -561,6 +661,9 @@ struct drm_bo_lock {
#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
#define _DRM_BM_ALLOCATOR_CACHED 0x0
#define _DRM_BM_ALLOCATOR_UNCACHED 0x1
struct drm_buffer_manager {
struct drm_bo_lock bm_lock;
struct mutex evict_mutex;
@ -579,6 +682,7 @@ struct drm_buffer_manager {
unsigned long cur_pages;
atomic_t count;
struct page *dummy_read_page;
int allocator_type;
};
struct drm_bo_driver {
@ -656,22 +760,10 @@ struct drm_bo_driver {
/*
* buffer objects (drm_bo.c)
*/
extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
int drm_bo_do_validate(struct drm_buffer_object *bo,
uint64_t flags, uint64_t mask, uint32_t hint,
uint32_t fence_class);
extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
extern int drm_bo_driver_finish(struct drm_device *dev);
extern int drm_bo_driver_init(struct drm_device *dev);
extern int drm_bo_pci_offset(struct drm_device *dev,
@ -681,6 +773,8 @@ extern int drm_bo_pci_offset(struct drm_device *dev,
unsigned long *bus_size);
extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
extern int drm_bo_add_user_object(struct drm_file *file_priv,
struct drm_buffer_object *bo, int shareable);
extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
extern void drm_putback_buffer_objects(struct drm_device *dev);
@ -706,30 +800,27 @@ extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_c
extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
unsigned long p_offset, unsigned long p_size,
int kern_init);
extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
uint64_t flags, uint64_t mask, uint32_t hint,
uint32_t fence_class,
struct drm_bo_info_rep *rep,
struct drm_buffer_object **bo_rep);
extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
uint32_t handle,
int check_owner);
extern int drm_bo_do_validate(struct drm_buffer_object *bo,
uint64_t flags, uint64_t mask, uint32_t hint,
uint32_t fence_class,
struct drm_bo_info_rep *rep);
extern int drm_bo_evict_cached(struct drm_buffer_object *bo);
extern void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
extern void drm_bo_evict_mm(struct drm_device *dev, int mem_type, int no_wait);
/*
* Buffer object memory move- and map helpers.
* drm_bo_move.c
*/
extern int drm_bo_add_ttm(struct drm_buffer_object *bo);
extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
int evict, int no_wait,
struct drm_bo_mem_reg *new_mem);
extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
int evict,
int no_wait, struct drm_bo_mem_reg *new_mem);
extern int drm_bo_move_zero(struct drm_buffer_object *bo,
int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
int evict, int no_wait,
uint32_t fence_class, uint32_t fence_type,
@ -763,8 +854,6 @@ extern int drm_bo_pfn_prot(struct drm_buffer_object *bo,
unsigned long dst_offset,
unsigned long *pfn,
pgprot_t *prot);
extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
struct drm_bo_info_rep *rep);
/*
@ -805,22 +894,18 @@ extern void drm_regs_init(struct drm_reg_manager *manager,
const void *),
void (*reg_destroy)(struct drm_reg *));
extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
void **virtual);
extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
void *virtual);
/*
* drm_bo_lock.c
* Simple replacement for the hardware lock on buffer manager init and clean.
* drm_uncached.c
*/
extern void drm_bo_init_lock(struct drm_bo_lock *lock);
extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
extern int drm_bo_read_lock(struct drm_bo_lock *lock,
int interruptible);
extern int drm_bo_write_lock(struct drm_bo_lock *lock,
int interruptible,
struct drm_file *file_priv);
extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
struct drm_file *file_priv);
extern int drm_uncached_init(void);
extern void drm_uncached_fini(void);
extern struct page *drm_get_uncached_page(void);
extern void drm_put_uncached_page(struct page *page);
#ifdef CONFIG_DEBUG_MUTEXES
#define DRM_ASSERT_LOCKED(_mutex) \

View File

@ -172,6 +172,7 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_master *master = minor->master;
struct drm_device *dev = minor->dev;
int len = 0;
@ -180,13 +181,16 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
return 0;
}
if (!master)
return 0;
*start = &buf[offset];
*eof = 0;
if (dev->unique) {
if (master->unique) {
DRM_PROC_PRINT("%s %s %s\n",
dev->driver->pci_driver.name,
pci_name(dev->pdev), dev->unique);
pci_name(dev->pdev), master->unique);
} else {
DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
pci_name(dev->pdev));

View File

@ -58,6 +58,14 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
int ret;
int base = 0, limit = 63;
if (type == DRM_MINOR_CONTROL) {
base += 64;
limit = base + 127;
} else if (type == DRM_MINOR_RENDER) {
base += 128;
limit = base + 255;
}
again:
if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Out of memory expanding drawable idr\n");
@ -80,6 +88,103 @@ again:
return new_id;
}
struct drm_master *drm_master_create(struct drm_minor *minor)
{
struct drm_master *master;
master = drm_calloc(1, sizeof(*master), DRM_MEM_DRIVER);
if (!master)
return NULL;
kref_init(&master->refcount);
spin_lock_init(&master->lock.spinlock);
init_waitqueue_head(&master->lock.lock_queue);
drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
INIT_LIST_HEAD(&master->magicfree);
master->minor = minor;
list_add_tail(&master->head, &minor->master_list);
return master;
}
struct drm_master *drm_master_get(struct drm_master *master)
{
kref_get(&master->refcount);
return master;
}
static void drm_master_destroy(struct kref *kref)
{
struct drm_master *master = container_of(kref, struct drm_master, refcount);
struct drm_magic_entry *pt, *next;
struct drm_device *dev = master->minor->dev;
list_del(&master->head);
if (dev->driver->master_destroy)
dev->driver->master_destroy(dev, master);
if (master->unique) {
drm_free(master->unique, strlen(master->unique) + 1, DRM_MEM_DRIVER);
master->unique = NULL;
master->unique_len = 0;
}
list_for_each_entry_safe(pt, next, &master->magicfree, head) {
list_del(&pt->head);
drm_ht_remove_item(&master->magiclist, &pt->hash_item);
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
drm_ht_remove(&master->magiclist);
if (master->lock.hw_lock) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
master->lock.hw_lock = NULL; /* SHM removed */
master->lock.file_priv = NULL;
wake_up_interruptible(&master->lock.lock_queue);
}
drm_free(master, sizeof(*master), DRM_MEM_DRIVER);
}
void drm_master_put(struct drm_master **master)
{
kref_put(&(*master)->refcount, drm_master_destroy);
*master = NULL;
}
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
return -EINVAL;
if (!file_priv->master)
return -EINVAL;
if (!file_priv->minor->master && file_priv->minor->master != file_priv->master) {
mutex_lock(&dev->struct_mutex);
file_priv->minor->master = drm_master_get(file_priv->master);
mutex_unlock(&dev->struct_mutex);
}
return 0;
}
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
if (!file_priv->master)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
drm_master_put(&file_priv->minor->master);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver)
@ -94,7 +199,7 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->tasklet_lock);
spin_lock_init(&dev->lock.spinlock);
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
@ -112,21 +217,14 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
dev->irq = pdev->irq;
dev->irq_enabled = 0;
if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER))
return -ENOMEM;
}
if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_ht_remove(&dev->map_hash);
return -ENOMEM;
}
if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
return -ENOMEM;
}
/* the DRM has 6 counters */
dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK;
@ -213,6 +311,7 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
new_minor->device = MKDEV(DRM_MAJOR, minor_id);
new_minor->dev = dev;
new_minor->index = minor_id;
INIT_LIST_HEAD(&new_minor->master_list);
idr_replace(&drm_minors_idr, new_minor, minor_id);
@ -299,20 +398,32 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
}
/* only add the control node on a modesetting platform */
if (drm_core_check_feature(dev, DRIVER_MODESET))
if ((ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL)))
goto err_g3;
if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
goto err_g3;
goto err_g4;
if (dev->driver->load)
if ((ret = dev->driver->load(dev, ent->driver_data)))
goto err_g4;
goto err_g5;
/* setup the grouping for the legacy output */
if (drm_core_check_feature(dev, DRIVER_MODESET))
if (drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group))
goto err_g5;
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, dev->primary->index);
return 0;
err_g5:
drm_put_minor(&dev->primary);
err_g4:
drm_put_minor(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
err_g3:
if (!drm_fb_loaded)
pci_disable_device(pdev);
@ -344,11 +455,6 @@ int drm_put_dev(struct drm_device * dev)
{
DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name);
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname) + 1,
DRM_MEM_DRIVER);
@ -368,15 +474,14 @@ int drm_put_dev(struct drm_device * dev)
* last minor released.
*
*/
int drm_put_minor(struct drm_device *dev)
int drm_put_minor(struct drm_minor **minor_p)
{
struct drm_minor **minor_p = &dev->primary;
struct drm_minor *minor = *minor_p;
DRM_DEBUG("release secondary minor %d\n", minor->index);
if (minor->type == DRM_MINOR_LEGACY) {
if (dev->driver->proc_cleanup)
dev->driver->proc_cleanup(minor);
if (minor->dev->driver->proc_cleanup)
minor->dev->driver->proc_cleanup(minor);
drm_proc_cleanup(minor, drm_proc_root);
}
drm_sysfs_device_remove(minor);
@ -387,3 +492,5 @@ int drm_put_minor(struct drm_device *dev)
*minor_p = NULL;
return 0;
}
EXPORT_SYMBOL(drm_put_minor);

View File

@ -36,8 +36,9 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
printk(KERN_ERR "%s\n", __FUNCTION__);
if (drm_dev->driver->suspend)
return drm_dev->driver->suspend(drm_dev, state);
if (drm_minor->type == DRM_MINOR_CONTROL)
if (drm_dev->driver->suspend)
return drm_dev->driver->suspend(drm_dev, state);
return 0;
}
@ -54,8 +55,9 @@ static int drm_sysfs_resume(struct device *dev)
struct drm_minor *drm_minor = to_drm_minor(dev);
struct drm_device *drm_dev = drm_minor->dev;
if (drm_dev->driver->resume)
return drm_dev->driver->resume(drm_dev);
if (drm_minor->type == DRM_MINOR_CONTROL)
if (drm_dev->driver->resume)
return drm_dev->driver->resume(drm_dev);
return 0;
}
@ -131,10 +133,6 @@ static ssize_t show_dri(struct device *device, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n", drm_dev->driver->pci_driver.name);
}
static struct device_attribute device_attrs[] = {
__ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
};
/**
* drm_sysfs_device_release - do nothing
* @dev: Linux device
@ -148,6 +146,316 @@ static void drm_sysfs_device_release(struct device *dev)
return;
}
/*
* Connector properties
*/
static ssize_t status_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct drm_connector *connector = container_of(device, struct drm_connector, kdev);
return snprintf(buf, PAGE_SIZE, "%s",
drm_get_connector_status_name(connector->funcs->detect(connector)));
}
static ssize_t dpms_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct drm_connector *connector = container_of(device, struct drm_connector, kdev);
struct drm_device *dev = connector->dev;
uint64_t dpms_status;
int ret;
ret = drm_connector_property_get_value(connector,
dev->mode_config.dpms_property,
&dpms_status);
if (ret)
return 0;
return snprintf(buf, PAGE_SIZE, "%s", drm_get_dpms_name((int)dpms_status));
}
static ssize_t enabled_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct drm_connector *connector = container_of(device, struct drm_connector, kdev);
if (connector->encoder)
return snprintf(buf, PAGE_SIZE, "enabled");
else
return snprintf(buf, PAGE_SIZE, "disabled");
}
static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct device *connector_dev = container_of(kobj, struct device, kobj);
struct drm_connector *connector = container_of(connector_dev, struct drm_connector,
kdev);
unsigned char *edid;
size_t size;
if (!connector->edid_blob_ptr)
return 0;
edid = connector->edid_blob_ptr->data;
size = connector->edid_blob_ptr->length;
if (!edid)
return 0;
if (off >= size)
return 0;
if (off + count > size)
count = size - off;
memcpy(buf, edid + off, count);
return count;
}
static ssize_t modes_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct drm_connector *connector = container_of(device, struct drm_connector, kdev);
struct drm_display_mode *mode;
int written = 0;
list_for_each_entry(mode, &connector->modes, head) {
written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
mode->name);
}
return written;
}
static ssize_t subconnector_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct drm_connector *connector = container_of(device, struct drm_connector, kdev);
struct drm_device *dev = connector->dev;
struct drm_property *prop = NULL;
uint64_t subconnector;
int ret;
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
prop = dev->mode_config.dvi_i_subconnector_property;
break;
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Component:
prop = dev->mode_config.tv_subconnector_property;
break;
default:
DRM_ERROR("Wrong connector type for this property\n");
return 0;
}
if (!prop) {
DRM_ERROR("Unable to find subconnector property\n");
return 0;
}
ret = drm_connector_property_get_value(connector, prop, &subconnector);
if (ret)
return 0;
return snprintf(buf, PAGE_SIZE, "%s", drm_get_subconnector_name((int)subconnector));
}
static ssize_t select_subconnector_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct drm_connector *connector = container_of(device, struct drm_connector, kdev);
struct drm_device *dev = connector->dev;
struct drm_property *prop = NULL;
uint64_t subconnector;
int ret;
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
prop = dev->mode_config.dvi_i_select_subconnector_property;
break;
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Component:
prop = dev->mode_config.tv_select_subconnector_property;
break;
default:
DRM_ERROR("Wrong connector type for this property\n");
return 0;
}
if (!prop) {
DRM_ERROR("Unable to find select subconnector property\n");
return 0;
}
ret = drm_connector_property_get_value(connector, prop, &subconnector);
if (ret)
return 0;
return snprintf(buf, PAGE_SIZE, "%s", drm_get_select_subconnector_name((int)subconnector));
}
static struct device_attribute connector_attrs[] = {
__ATTR_RO(status),
__ATTR_RO(enabled),
__ATTR_RO(dpms),
__ATTR_RO(modes),
};
/* These attributes are for both DVI-I connectors and all types of tv-out. */
static struct device_attribute connector_attrs_opt1[] = {
__ATTR_RO(subconnector),
__ATTR_RO(select_subconnector),
};
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.size = 128,
.read = edid_show,
};
/**
* drm_sysfs_connector_add - add an connector to sysfs
* @connector: connector to add
*
* Create an connector device in sysfs, along with its associated connector
* properties (so far, connection status, dpms, mode list & edid) and
* generate a hotplug event so userspace knows there's a new connector
* available.
*
* Note:
* This routine should only be called *once* for each DRM minor registered.
* A second call for an already registered device will trigger the BUG_ON
* below.
*/
int drm_sysfs_connector_add(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
int ret = 0, i, j;
/* We shouldn't get called more than once for the same connector */
BUG_ON(device_is_registered(&connector->kdev));
connector->kdev.parent = &dev->primary->kdev;
connector->kdev.class = drm_class;
connector->kdev.release = drm_sysfs_device_release;
DRM_DEBUG("adding \"%s\" to sysfs\n",
drm_get_connector_name(connector));
snprintf(connector->kdev.bus_id, BUS_ID_SIZE, "card%d-%s",
dev->primary->index, drm_get_connector_name(connector));
ret = device_register(&connector->kdev);
if (ret) {
DRM_ERROR("failed to register connector device: %d\n", ret);
goto out;
}
/* Standard attributes */
for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) {
ret = device_create_file(&connector->kdev, &connector_attrs[i]);
if (ret)
goto err_out_files;
}
/* Optional attributes */
/* On the long run it maybe a good idea to make one set of optionals per connector type. */
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Component:
for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) {
ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]);
if (ret)
goto err_out_files;
}
break;
default:
break;
}
ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
if (ret)
goto err_out_files;
/* Let userspace know we have a new connector */
drm_sysfs_hotplug_event(dev);
return 0;
err_out_files:
if (i > 0)
for (j = 0; j < i; j++)
device_remove_file(&connector->kdev, &connector_attrs[i]);
device_unregister(&connector->kdev);
out:
return ret;
}
EXPORT_SYMBOL(drm_sysfs_connector_add);
/**
* drm_sysfs_connector_remove - remove an connector device from sysfs
* @connector: connector to remove
*
* Remove @connector and its associated attributes from sysfs. Note that
* the device model core will take care of sending the "remove" uevent
* at this time, so we don't need to do it.
*
* Note:
* This routine should only be called if the connector was previously
* successfully registered. If @connector hasn't been registered yet,
* you'll likely see a panic somewhere deep in sysfs code when called.
*/
void drm_sysfs_connector_remove(struct drm_connector *connector)
{
int i;
DRM_DEBUG("removing \"%s\" from sysfs\n",
drm_get_connector_name(connector));
for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
device_remove_file(&connector->kdev, &connector_attrs[i]);
sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
device_unregister(&connector->kdev);
}
EXPORT_SYMBOL(drm_sysfs_connector_remove);
/**
* drm_sysfs_hotplug_event - generate a DRM uevent
* @dev: DRM device
*
* Send a uevent for the DRM device specified by @dev. Currently we only
* set HOTPLUG=1 in the uevent environment, but this could be expanded to
* deal with other types of events.
*/
void drm_sysfs_hotplug_event(struct drm_device *dev)
{
char *event_string = "HOTPLUG=1";
char *envp[] = { event_string, NULL };
DRM_DEBUG("generating hotplug event\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
static struct device_attribute dri_attrs[] = {
__ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
};
/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
* @dev: DRM device to be added
@ -156,6 +464,11 @@ static void drm_sysfs_device_release(struct device *dev)
* Add a DRM device to the DRM's device model class. We use @dev's PCI device
* as the parent for the Linux device, and make sure it has a file containing
* the driver we're using (for userspace compatibility).
*
* Note:
* This routine should only be called *once* for each DRM minor registered.
* A second call for an already registered device will trigger the BUG_ON
* below.
*/
int drm_sysfs_device_add(struct drm_minor *minor)
{
@ -167,18 +480,28 @@ int drm_sysfs_device_add(struct drm_minor *minor)
minor->kdev.class = drm_class;
minor->kdev.release = drm_sysfs_device_release;
minor->kdev.devt = minor->device;
minor_str = "card%d";
if (minor->type == DRM_MINOR_CONTROL)
minor_str = "controlD%d";
else if (minor->type == DRM_MINOR_RENDER)
minor_str = "renderD%d";
else
minor_str = "card%d";
snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index);
/* Shouldn't register more than once */
BUG_ON(device_is_registered(&minor->kdev));
DRM_DEBUG("registering DRM device \"%s\"\n", minor->kdev.bus_id);
err = device_register(&minor->kdev);
if (err) {
DRM_ERROR("device add failed: %d\n", err);
goto err_out;
}
for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
err = device_create_file(&minor->kdev, &device_attrs[i]);
for (i = 0; i < ARRAY_SIZE(dri_attrs); i++) {
err = device_create_file(&minor->kdev, &dri_attrs[i]);
if (err)
goto err_out_files;
}
@ -188,7 +511,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
err_out_files:
if (i > 0)
for (j = 0; j < i; j++)
device_remove_file(&minor->kdev, &device_attrs[j]);
device_remove_file(&minor->kdev, &dri_attrs[j]);
device_unregister(&minor->kdev);
err_out:
@ -206,7 +529,7 @@ void drm_sysfs_device_remove(struct drm_minor *minor)
{
int i;
for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
device_remove_file(&minor->kdev, &device_attrs[i]);
for (i = 0; i < ARRAY_SIZE(dri_attrs); i++)
device_remove_file(&minor->kdev, &dri_attrs[i]);
device_unregister(&minor->kdev);
}

View File

@ -120,14 +120,18 @@ static void drm_ttm_free_page_directory(struct drm_ttm *ttm)
ttm->pages = NULL;
}
static struct page *drm_ttm_alloc_page(void)
static struct page *drm_ttm_alloc_page(struct drm_ttm *ttm)
{
struct page *page;
if (drm_alloc_memctl(PAGE_SIZE))
return NULL;
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED)
page = drm_get_uncached_page();
else
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) {
drm_free_memctl(PAGE_SIZE);
return NULL;
@ -149,6 +153,9 @@ static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)
struct page **cur_page;
int do_tlbflush = 0;
if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED)
return 0;
if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
return 0;
@ -215,14 +222,18 @@ static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages + i;
if (*cur_page) {
if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED)
drm_put_uncached_page(*cur_page);
else {
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
ClearPageReserved(*cur_page);
ClearPageReserved(*cur_page);
#endif
if (page_count(*cur_page) != 1)
DRM_ERROR("Erroneous page count. Leaking pages.\n");
if (page_mapped(*cur_page))
DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
__free_page(*cur_page);
if (page_count(*cur_page) != 1)
DRM_ERROR("Erroneous page count. Leaking pages.\n");
if (page_mapped(*cur_page))
DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
__free_page(*cur_page);
}
drm_free_memctl(PAGE_SIZE);
--bm->cur_pages;
}
@ -268,7 +279,7 @@ struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
struct drm_buffer_manager *bm = &ttm->dev->bm;
while(NULL == (p = ttm->pages[index])) {
p = drm_ttm_alloc_page();
p = drm_ttm_alloc_page(ttm);
if (!p)
return NULL;

138
linux-core/drm_uncached.c Normal file
View File

@ -0,0 +1,138 @@
/*
* Copyright (c) Red Hat Inc.
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie <airlied@redhat.com>
*/
/* simple list based uncached page allocator
* - Add chunks of 1MB to the allocator at a time.
* - Use page->lru to keep a free list
* - doesn't track currently in use pages
*
* TODO: Add shrinker support
*/
#include "drmP.h"
#include <asm/agp.h>
static struct list_head uncached_free_list;
static struct mutex uncached_mutex;
static int uncached_inited;
static int total_uncached_pages;
/* add 1MB at a time */
#define NUM_PAGES_TO_ADD 256
static void drm_uncached_page_put(struct page *page)
{
unmap_page_from_agp(page);
put_page(page);
__free_page(page);
}
int drm_uncached_add_pages_locked(int num_pages)
{
struct page *page;
int i;
DRM_DEBUG("adding uncached memory %ld\n", num_pages * PAGE_SIZE);
for (i = 0; i < num_pages; i++) {
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) {
DRM_ERROR("unable to get page %d\n", i);
return i;
}
get_page(page);
#ifdef CONFIG_X86
set_memory_wc((unsigned long)page_address(page), 1);
#else
map_page_into_agp(page);
#endif
list_add(&page->lru, &uncached_free_list);
total_uncached_pages++;
}
return i;
}
struct page *drm_get_uncached_page(void)
{
struct page *page = NULL;
int ret;
mutex_lock(&uncached_mutex);
if (list_empty(&uncached_free_list)) {
ret = drm_uncached_add_pages_locked(NUM_PAGES_TO_ADD);
if (ret == 0)
return NULL;
}
page = list_first_entry(&uncached_free_list, struct page, lru);
list_del(&page->lru);
mutex_unlock(&uncached_mutex);
return page;
}
void drm_put_uncached_page(struct page *page)
{
mutex_lock(&uncached_mutex);
list_add(&page->lru, &uncached_free_list);
mutex_unlock(&uncached_mutex);
}
void drm_uncached_release_all_pages(void)
{
struct page *page, *tmp;
list_for_each_entry_safe(page, tmp, &uncached_free_list, lru) {
list_del(&page->lru);
drm_uncached_page_put(page);
}
}
int drm_uncached_init(void)
{
if (uncached_inited)
return 0;
INIT_LIST_HEAD(&uncached_free_list);
mutex_init(&uncached_mutex);
uncached_inited = 1;
return 0;
}
void drm_uncached_fini(void)
{
if (!uncached_inited)
return;
uncached_inited = 0;
drm_uncached_release_all_pages();
}

View File

@ -715,13 +715,9 @@ static int drm_bo_vm_fault(struct vm_area_struct *vma,
unsigned long ret = VM_FAULT_NOPAGE;
dev = bo->dev;
err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
if (err)
return VM_FAULT_NOPAGE;
err = mutex_lock_interruptible(&bo->mutex);
if (err) {
drm_bo_read_unlock(&dev->bm.bm_lock);
return VM_FAULT_NOPAGE;
}
@ -788,7 +784,6 @@ static int drm_bo_vm_fault(struct vm_area_struct *vma,
out_unlock:
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
mutex_unlock(&bo->mutex);
drm_bo_read_unlock(&dev->bm.bm_lock);
return ret;
}
#endif
@ -797,6 +792,10 @@ static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
{
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
/* clear the clean flags */
bo->mem.flags &= ~DRM_BO_FLAG_CLEAN;
bo->mem.proposed_flags &= ~DRM_BO_FLAG_CLEAN;
drm_vm_open_locked(vma);
atomic_inc(&bo->usage);
#ifdef DRM_ODD_MM_COMPAT

159
linux-core/dvo.h Normal file
View File

@ -0,0 +1,159 @@
/*
* Copyright © 2006 Eric Anholt
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#ifndef _INTEL_DVO_H
#define _INTEL_DVO_H
#include <linux/i2c.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "intel_drv.h"
struct intel_dvo_device {
char *name;
int type;
/* DVOA/B/C output register */
u32 dvo_reg;
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
struct intel_i2c_chan *i2c_bus;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
struct drm_display_mode *panel_fixed_mode;
bool panel_wants_dither;
};
struct intel_dvo_dev_ops {
/*
* Initialize the device at startup time.
* Returns NULL if the device does not exist.
*/
bool (*init)(struct intel_dvo_device *dvo,
struct intel_i2c_chan *i2cbus);
/*
* Called to allow the output a chance to create properties after the
* RandR objects have been created.
*/
void (*create_resources)(struct intel_dvo_device *dvo);
/*
* Turn on/off output or set intermediate power levels if available.
*
* Unsupported intermediate modes drop to the lower power setting.
* If the mode is DPMSModeOff, the output must be disabled,
* as the DPLL may be disabled afterwards.
*/
void (*dpms)(struct intel_dvo_device *dvo, int mode);
/*
* Saves the output's state for restoration on VT switch.
*/
void (*save)(struct intel_dvo_device *dvo);
/*
* Restore's the output's state at VT switch.
*/
void (*restore)(struct intel_dvo_device *dvo);
/*
* Callback for testing a video mode for a given output.
*
* This function should only check for cases where a mode can't
* be supported on the output specifically, and not represent
* generic CRTC limitations.
*
* \return MODE_OK if the mode is valid, or another MODE_* otherwise.
*/
int (*mode_valid)(struct intel_dvo_device *dvo,
struct drm_display_mode *mode);
/*
* Callback to adjust the mode to be set in the CRTC.
*
* This allows an output to adjust the clock or even the entire set of
* timings, which is used for panels with fixed timings or for
* buses with clock limitations.
*/
bool (*mode_fixup)(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/*
* Callback for preparing mode changes on an output
*/
void (*prepare)(struct intel_dvo_device *dvo);
/*
* Callback for committing mode changes on an output
*/
void (*commit)(struct intel_dvo_device *dvo);
/*
* Callback for setting up a video mode after fixups have been made.
*
* This is only called while the output is disabled. The dpms callback
* must be all that's necessary for the output, to turn the output on
* after this function is called.
*/
void (*mode_set)(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/*
* Probe for a connected output, and return detect_status.
*/
enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
/**
* Query the device for the modes it provides.
*
* This function may also update MonInfo, mm_width, and mm_height.
*
* \return singly-linked list of modes or NULL if no modes found.
*/
struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
#ifdef RANDR_12_INTERFACE
/**
* Callback when an output's property has changed.
*/
bool (*set_property)(struct intel_dvo_device *dvo,
struct drm_property *property, uint64_t val);
#endif
/**
* Clean up driver-specific bits of the output
*/
void (*destroy) (struct intel_dvo_device *dvo);
/**
* Debugging hook to dump device registers to log file
*/
void (*dump_regs)(struct intel_dvo_device *dvo);
};
#endif /* _INTEL_DVO_H */

454
linux-core/dvo_ch7017.c Normal file
View File

@ -0,0 +1,454 @@
/*
* Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include "dvo.h"
#define CH7017_TV_DISPLAY_MODE 0x00
#define CH7017_FLICKER_FILTER 0x01
#define CH7017_VIDEO_BANDWIDTH 0x02
#define CH7017_TEXT_ENHANCEMENT 0x03
#define CH7017_START_ACTIVE_VIDEO 0x04
#define CH7017_HORIZONTAL_POSITION 0x05
#define CH7017_VERTICAL_POSITION 0x06
#define CH7017_BLACK_LEVEL 0x07
#define CH7017_CONTRAST_ENHANCEMENT 0x08
#define CH7017_TV_PLL 0x09
#define CH7017_TV_PLL_M 0x0a
#define CH7017_TV_PLL_N 0x0b
#define CH7017_SUB_CARRIER_0 0x0c
#define CH7017_CIV_CONTROL 0x10
#define CH7017_CIV_0 0x11
#define CH7017_CHROMA_BOOST 0x14
#define CH7017_CLOCK_MODE 0x1c
#define CH7017_INPUT_CLOCK 0x1d
#define CH7017_GPIO_CONTROL 0x1e
#define CH7017_INPUT_DATA_FORMAT 0x1f
#define CH7017_CONNECTION_DETECT 0x20
#define CH7017_DAC_CONTROL 0x21
#define CH7017_BUFFERED_CLOCK_OUTPUT 0x22
#define CH7017_DEFEAT_VSYNC 0x47
#define CH7017_TEST_PATTERN 0x48
#define CH7017_POWER_MANAGEMENT 0x49
/** Enables the TV output path. */
#define CH7017_TV_EN (1 << 0)
#define CH7017_DAC0_POWER_DOWN (1 << 1)
#define CH7017_DAC1_POWER_DOWN (1 << 2)
#define CH7017_DAC2_POWER_DOWN (1 << 3)
#define CH7017_DAC3_POWER_DOWN (1 << 4)
/** Powers down the TV out block, and DAC0-3 */
#define CH7017_TV_POWER_DOWN_EN (1 << 5)
#define CH7017_VERSION_ID 0x4a
#define CH7017_DEVICE_ID 0x4b
#define CH7017_DEVICE_ID_VALUE 0x1b
#define CH7018_DEVICE_ID_VALUE 0x1a
#define CH7019_DEVICE_ID_VALUE 0x19
#define CH7017_XCLK_D2_ADJUST 0x53
#define CH7017_UP_SCALER_COEFF_0 0x55
#define CH7017_UP_SCALER_COEFF_1 0x56
#define CH7017_UP_SCALER_COEFF_2 0x57
#define CH7017_UP_SCALER_COEFF_3 0x58
#define CH7017_UP_SCALER_COEFF_4 0x59
#define CH7017_UP_SCALER_VERTICAL_INC_0 0x5a
#define CH7017_UP_SCALER_VERTICAL_INC_1 0x5b
#define CH7017_GPIO_INVERT 0x5c
#define CH7017_UP_SCALER_HORIZONTAL_INC_0 0x5d
#define CH7017_UP_SCALER_HORIZONTAL_INC_1 0x5e
#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT 0x5f
/**< Low bits of horizontal active pixel input */
#define CH7017_ACTIVE_INPUT_LINE_OUTPUT 0x60
/** High bits of horizontal active pixel input */
#define CH7017_LVDS_HAP_INPUT_MASK (0x7 << 0)
/** High bits of vertical active line output */
#define CH7017_LVDS_VAL_HIGH_MASK (0x7 << 3)
#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT 0x61
/**< Low bits of vertical active line output */
#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT 0x62
/**< Low bits of horizontal active pixel output */
#define CH7017_LVDS_POWER_DOWN 0x63
/** High bits of horizontal active pixel output */
#define CH7017_LVDS_HAP_HIGH_MASK (0x7 << 0)
/** Enables the LVDS power down state transition */
#define CH7017_LVDS_POWER_DOWN_EN (1 << 6)
/** Enables the LVDS upscaler */
#define CH7017_LVDS_UPSCALER_EN (1 << 7)
#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08
#define CH7017_LVDS_ENCODING 0x64
#define CH7017_LVDS_DITHER_2D (1 << 2)
#define CH7017_LVDS_DITHER_DIS (1 << 3)
#define CH7017_LVDS_DUAL_CHANNEL_EN (1 << 4)
#define CH7017_LVDS_24_BIT (1 << 5)
#define CH7017_LVDS_ENCODING_2 0x65
#define CH7017_LVDS_PLL_CONTROL 0x66
/** Enables the LVDS panel output path */
#define CH7017_LVDS_PANEN (1 << 0)
/** Enables the LVDS panel backlight */
#define CH7017_LVDS_BKLEN (1 << 3)
#define CH7017_POWER_SEQUENCING_T1 0x67
#define CH7017_POWER_SEQUENCING_T2 0x68
#define CH7017_POWER_SEQUENCING_T3 0x69
#define CH7017_POWER_SEQUENCING_T4 0x6a
#define CH7017_POWER_SEQUENCING_T5 0x6b
#define CH7017_GPIO_DRIVER_TYPE 0x6c
#define CH7017_GPIO_DATA 0x6d
#define CH7017_GPIO_DIRECTION_CONTROL 0x6e
#define CH7017_LVDS_PLL_FEEDBACK_DIV 0x71
# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4
# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0
# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80
#define CH7017_LVDS_PLL_VCO_CONTROL 0x72
# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80
# define CH7017_LVDS_PLL_VCO_SHIFT 4
# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0
#define CH7017_OUTPUTS_ENABLE 0x73
# define CH7017_CHARGE_PUMP_LOW 0x0
# define CH7017_CHARGE_PUMP_HIGH 0x3
# define CH7017_LVDS_CHANNEL_A (1 << 3)
# define CH7017_LVDS_CHANNEL_B (1 << 4)
# define CH7017_TV_DAC_A (1 << 5)
# define CH7017_TV_DAC_B (1 << 6)
# define CH7017_DDC_SELECT_DC2 (1 << 7)
#define CH7017_LVDS_OUTPUT_AMPLITUDE 0x74
#define CH7017_LVDS_PLL_EMI_REDUCTION 0x75
#define CH7017_LVDS_POWER_DOWN_FLICKER 0x76
#define CH7017_LVDS_CONTROL_2 0x78
# define CH7017_LOOP_FILTER_SHIFT 5
# define CH7017_PHASE_DETECTOR_SHIFT 0
#define CH7017_BANG_LIMIT_CONTROL 0x7f
struct ch7017_priv {
uint8_t save_hapi;
uint8_t save_vali;
uint8_t save_valo;
uint8_t save_ailo;
uint8_t save_lvds_pll_vco;
uint8_t save_feedback_div;
uint8_t save_lvds_control_2;
uint8_t save_outputs_enable;
uint8_t save_lvds_power_down;
uint8_t save_power_management;
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
{
struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
.addr = i2cbus->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = i2cbus->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
}
};
out_buf[0] = addr;
out_buf[1] = 0;
if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
*val= in_buf[0];
return true;
};
return false;
}
static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
{
struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = i2cbus->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
};
out_buf[0] = addr;
out_buf[1] = val;
if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
return true;
return false;
}
/** Probes for a CH7017 on the given bus and slave address. */
static bool ch7017_init(struct intel_dvo_device *dvo,
struct intel_i2c_chan *i2cbus)
{
struct ch7017_priv *priv;
uint8_t val;
priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
if (priv == NULL)
return false;
dvo->i2c_bus = i2cbus;
dvo->i2c_bus->slave_addr = dvo->slave_addr;
dvo->dev_priv = priv;
if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
goto fail;
if (val != CH7017_DEVICE_ID_VALUE &&
val != CH7018_DEVICE_ID_VALUE &&
val != CH7019_DEVICE_ID_VALUE) {
DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
val, i2cbus->adapter.name,i2cbus->slave_addr);
goto fail;
}
return true;
fail:
kfree(priv);
return false;
}
static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
{
return connector_status_unknown;
}
static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
if (mode->clock > 160000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static void ch7017_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
uint8_t outputs_enable, lvds_control_2, lvds_power_down;
uint8_t horizontal_active_pixel_input;
uint8_t horizontal_active_pixel_output, vertical_active_line_output;
uint8_t active_input_line_output;
DRM_DEBUG("Registers before mode setting\n");
ch7017_dump_regs(dvo);
/* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
if (mode->clock < 100000) {
outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW;
lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
(2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
(13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
(2 << CH7017_LVDS_PLL_VCO_SHIFT) |
(3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) |
(0 << CH7017_PHASE_DETECTOR_SHIFT);
} else {
outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH;
lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
(2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
(3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
lvds_pll_feedback_div = 35;
lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) |
(0 << CH7017_PHASE_DETECTOR_SHIFT);
if (1) { /* XXX: dual channel panel detection. Assume yes for now. */
outputs_enable |= CH7017_LVDS_CHANNEL_B;
lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
(2 << CH7017_LVDS_PLL_VCO_SHIFT) |
(13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
} else {
lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
(1 << CH7017_LVDS_PLL_VCO_SHIFT) |
(13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
}
}
horizontal_active_pixel_input = mode->hdisplay & 0x00ff;
vertical_active_line_output = mode->vdisplay & 0x00ff;
horizontal_active_pixel_output = mode->hdisplay & 0x00ff;
active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) |
(((mode->vdisplay & 0x0700) >> 8) << 3);
lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
(mode->hdisplay & 0x0700) >> 8;
ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
horizontal_active_pixel_input);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
horizontal_active_pixel_output);
ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT,
vertical_active_line_output);
ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT,
active_input_line_output);
ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control);
ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div);
ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2);
ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable);
/* Turn the LVDS back on with new settings. */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
DRM_DEBUG("Registers after mode setting\n");
ch7017_dump_regs(dvo);
}
/* set the CH7017 power state */
static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
{
uint8_t val;
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
/* Turn off TV/VGA, and never turn it on since we don't support it. */
ch7017_write(dvo, CH7017_POWER_MANAGEMENT,
CH7017_DAC0_POWER_DOWN |
CH7017_DAC1_POWER_DOWN |
CH7017_DAC2_POWER_DOWN |
CH7017_DAC3_POWER_DOWN |
CH7017_TV_POWER_DOWN_EN);
if (mode == DRM_MODE_DPMS_ON) {
/* Turn on the LVDS */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
val & ~CH7017_LVDS_POWER_DOWN_EN);
} else {
/* Turn off the LVDS */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
val | CH7017_LVDS_POWER_DOWN_EN);
}
/* XXX: Should actually wait for update power status somehow */
udelay(20000);
}
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
#define DUMP(reg) \
do { \
ch7017_read(dvo, reg, &val); \
DRM_DEBUG(#reg ": %02x\n", val); \
} while (0)
DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT);
DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT);
DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT);
DUMP(CH7017_LVDS_PLL_VCO_CONTROL);
DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV);
DUMP(CH7017_LVDS_CONTROL_2);
DUMP(CH7017_OUTPUTS_ENABLE);
DUMP(CH7017_LVDS_POWER_DOWN);
}
static void ch7017_save(struct intel_dvo_device *dvo)
{
struct ch7017_priv *priv = dvo->dev_priv;
ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
}
static void ch7017_restore(struct intel_dvo_device *dvo)
{
struct ch7017_priv *priv = dvo->dev_priv;
/* Power down before changing mode */
ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
}
static void ch7017_destroy(struct intel_dvo_device *dvo)
{
struct ch7017_priv *priv = dvo->dev_priv;
if (priv) {
kfree(priv);
dvo->dev_priv = NULL;
}
}
struct intel_dvo_dev_ops ch7017_ops = {
.init = ch7017_init,
.detect = ch7017_detect,
.mode_valid = ch7017_mode_valid,
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
.dump_regs = ch7017_dump_regs,
.save = ch7017_save,
.restore = ch7017_restore,
.destroy = ch7017_destroy,
};

368
linux-core/dvo_ch7xxx.c Normal file
View File

@ -0,0 +1,368 @@
/**************************************************************************
Copyright © 2006 Dave Airlie
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sub license, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
#include "dvo.h"
#define CH7xxx_REG_VID 0x4a
#define CH7xxx_REG_DID 0x4b
#define CH7011_VID 0x83 /* 7010 as well */
#define CH7009A_VID 0x84
#define CH7009B_VID 0x85
#define CH7301_VID 0x95
#define CH7xxx_VID 0x84
#define CH7xxx_DID 0x17
#define CH7xxx_NUM_REGS 0x4c
#define CH7xxx_CM 0x1c
#define CH7xxx_CM_XCM (1<<0)
#define CH7xxx_CM_MCP (1<<2)
#define CH7xxx_INPUT_CLOCK 0x1d
#define CH7xxx_GPIO 0x1e
#define CH7xxx_GPIO_HPIR (1<<3)
#define CH7xxx_IDF 0x1f
#define CH7xxx_IDF_HSP (1<<3)
#define CH7xxx_IDF_VSP (1<<4)
#define CH7xxx_CONNECTION_DETECT 0x20
#define CH7xxx_CDET_DVI (1<<5)
#define CH7301_DAC_CNTL 0x21
#define CH7301_HOTPLUG 0x23
#define CH7xxx_TCTL 0x31
#define CH7xxx_TVCO 0x32
#define CH7xxx_TPCP 0x33
#define CH7xxx_TPD 0x34
#define CH7xxx_TPVT 0x35
#define CH7xxx_TLPF 0x36
#define CH7xxx_TCT 0x37
#define CH7301_TEST_PATTERN 0x48
#define CH7xxx_PM 0x49
#define CH7xxx_PM_FPD (1<<0)
#define CH7301_PM_DACPD0 (1<<1)
#define CH7301_PM_DACPD1 (1<<2)
#define CH7301_PM_DACPD2 (1<<3)
#define CH7xxx_PM_DVIL (1<<6)
#define CH7xxx_PM_DVIP (1<<7)
#define CH7301_SYNC_POLARITY 0x56
#define CH7301_SYNC_RGB_YUV (1<<0)
#define CH7301_SYNC_POL_DVI (1<<5)
/** @file
* driver for the Chrontel 7xxx DVI chip over DVO.
*/
static struct ch7xxx_id_struct {
uint8_t vid;
char *name;
} ch7xxx_ids[] = {
{ CH7011_VID, "CH7011" },
{ CH7009A_VID, "CH7009A" },
{ CH7009B_VID, "CH7009B" },
{ CH7301_VID, "CH7301" },
};
struct ch7xxx_reg_state {
uint8_t regs[CH7xxx_NUM_REGS];
};
struct ch7xxx_priv {
bool quiet;
struct ch7xxx_reg_state save_reg;
struct ch7xxx_reg_state mode_reg;
uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
};
static void ch7xxx_save(struct intel_dvo_device *dvo);
static char *ch7xxx_get_id(uint8_t vid)
{
int i;
for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) {
if (ch7xxx_ids[i].vid == vid)
return ch7xxx_ids[i].name;
}
return NULL;
}
/** Reads an 8 bit register */
static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
.addr = i2cbus->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = i2cbus->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
}
};
out_buf[0] = addr;
out_buf[1] = 0;
if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
if (!ch7xxx->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
addr, i2cbus->adapter.name, i2cbus->slave_addr);
}
return false;
}
/** Writes an 8 bit register */
static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = i2cbus->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
};
out_buf[0] = addr;
out_buf[1] = ch;
if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
return true;
if (!ch7xxx->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, i2cbus->slave_addr);
}
return false;
}
static bool ch7xxx_init(struct intel_dvo_device *dvo,
struct intel_i2c_chan *i2cbus)
{
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
uint8_t vendor, device;
char *name;
ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
if (ch7xxx == NULL)
return false;
dvo->i2c_bus = i2cbus;
dvo->i2c_bus->slave_addr = dvo->slave_addr;
dvo->dev_priv = ch7xxx;
ch7xxx->quiet = true;
if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor))
goto out;
name = ch7xxx_get_id(vendor);
if (!name) {
DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
vendor, i2cbus->adapter.name, i2cbus->slave_addr);
goto out;
}
if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
goto out;
if (device != CH7xxx_DID) {
DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
vendor, i2cbus->adapter.name, i2cbus->slave_addr);
goto out;
}
ch7xxx->quiet = false;
DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
name, vendor, device);
return true;
out:
kfree(ch7xxx);
return false;
}
static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
{
uint8_t cdet, orig_pm, pm;
ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
pm = orig_pm;
pm &= ~CH7xxx_PM_FPD;
pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP;
ch7xxx_writeb(dvo, CH7xxx_PM, pm);
ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet);
ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm);
if (cdet & CH7xxx_CDET_DVI)
return connector_status_connected;
return connector_status_disconnected;
}
static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
uint8_t tvco, tpcp, tpd, tlpf, idf;
if (mode->clock <= 65000) {
tvco = 0x23;
tpcp = 0x08;
tpd = 0x16;
tlpf = 0x60;
} else {
tvco = 0x2d;
tpcp = 0x06;
tpd = 0x26;
tlpf = 0xa0;
}
ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00);
ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco);
ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp);
ch7xxx_writeb(dvo, CH7xxx_TPD, tpd);
ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30);
ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf);
ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00);
ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
idf |= CH7xxx_IDF_HSP;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
idf |= CH7xxx_IDF_HSP;
ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
}
/* set the CH7xxx power state */
static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
{
if (mode == DRM_MODE_DPMS_ON)
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
else
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
}
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
int i;
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
if ((i % 8) == 0 )
DRM_DEBUG("\n %02X: ", i);
DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]);
}
}
static void ch7xxx_save(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
}
static void ch7xxx_restore(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
}
static void ch7xxx_destroy(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
if (ch7xxx) {
kfree(ch7xxx);
dvo->dev_priv = NULL;
}
}
struct intel_dvo_dev_ops ch7xxx_ops = {
.init = ch7xxx_init,
.detect = ch7xxx_detect,
.mode_valid = ch7xxx_mode_valid,
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
.dump_regs = ch7xxx_dump_regs,
.save = ch7xxx_save,
.restore = ch7xxx_restore,
.destroy = ch7xxx_destroy,
};

463
linux-core/dvo_ivch.c Normal file
View File

@ -0,0 +1,463 @@
/*
* Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include "dvo.h"
/*
* register definitions for the i82807aa.
*
* Documentation on this chipset can be found in datasheet #29069001 at
* intel.com.
*/
/*
* VCH Revision & GMBus Base Addr
*/
#define VR00 0x00
# define VR00_BASE_ADDRESS_MASK 0x007f
/*
* Functionality Enable
*/
#define VR01 0x01
/*
* Enable the panel fitter
*/
# define VR01_PANEL_FIT_ENABLE (1 << 3)
/*
* Enables the LCD display.
*
* This must not be set while VR01_DVO_BYPASS_ENABLE is set.
*/
# define VR01_LCD_ENABLE (1 << 2)
/** Enables the DVO repeater. */
# define VR01_DVO_BYPASS_ENABLE (1 << 1)
/** Enables the DVO clock */
# define VR01_DVO_ENABLE (1 << 0)
/*
* LCD Interface Format
*/
#define VR10 0x10
/** Enables LVDS output instead of CMOS */
# define VR10_LVDS_ENABLE (1 << 4)
/** Enables 18-bit LVDS output. */
# define VR10_INTERFACE_1X18 (0 << 2)
/** Enables 24-bit LVDS or CMOS output */
# define VR10_INTERFACE_1X24 (1 << 2)
/** Enables 2x18-bit LVDS or CMOS output. */
# define VR10_INTERFACE_2X18 (2 << 2)
/** Enables 2x24-bit LVDS output */
# define VR10_INTERFACE_2X24 (3 << 2)
/*
* VR20 LCD Horizontal Display Size
*/
#define VR20 0x20
/*
* LCD Vertical Display Size
*/
#define VR21 0x20
/*
* Panel power down status
*/
#define VR30 0x30
/** Read only bit indicating that the panel is not in a safe poweroff state. */
# define VR30_PANEL_ON (1 << 15)
#define VR40 0x40
# define VR40_STALL_ENABLE (1 << 13)
# define VR40_VERTICAL_INTERP_ENABLE (1 << 12)
# define VR40_ENHANCED_PANEL_FITTING (1 << 11)
# define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10)
# define VR40_AUTO_RATIO_ENABLE (1 << 9)
# define VR40_CLOCK_GATING_ENABLE (1 << 8)
/*
* Panel Fitting Vertical Ratio
* (((image_height - 1) << 16) / ((panel_height - 1))) >> 2
*/
#define VR41 0x41
/*
* Panel Fitting Horizontal Ratio
* (((image_width - 1) << 16) / ((panel_width - 1))) >> 2
*/
#define VR42 0x42
/*
* Horizontal Image Size
*/
#define VR43 0x43
/* VR80 GPIO 0
*/
#define VR80 0x80
#define VR81 0x81
#define VR82 0x82
#define VR83 0x83
#define VR84 0x84
#define VR85 0x85
#define VR86 0x86
#define VR87 0x87
/* VR88 GPIO 8
*/
#define VR88 0x88
/* Graphics BIOS scratch 0
*/
#define VR8E 0x8E
# define VR8E_PANEL_TYPE_MASK (0xf << 0)
# define VR8E_PANEL_INTERFACE_CMOS (0 << 4)
# define VR8E_PANEL_INTERFACE_LVDS (1 << 4)
# define VR8E_FORCE_DEFAULT_PANEL (1 << 5)
/* Graphics BIOS scratch 1
*/
#define VR8F 0x8F
# define VR8F_VCH_PRESENT (1 << 0)
# define VR8F_DISPLAY_CONN (1 << 1)
# define VR8F_POWER_MASK (0x3c)
# define VR8F_POWER_POS (2)
struct ivch_priv {
bool quiet;
uint16_t width, height;
uint16_t save_VR01;
uint16_t save_VR40;
};
#if 0
struct vch_capabilities {
struct aimdb_block aimdb_block;
uint8_t panel_type;
uint8_t set_panel_type;
uint8_t slave_address;
uint8_t capabilities;
#define VCH_PANEL_FITTING_SUPPORT (0x3 << 0)
#define VCH_PANEL_FITTING_TEXT (1 << 2)
#define VCH_PANEL_FITTING_GRAPHICS (1 << 3)
#define VCH_PANEL_FITTING_RATIO (1 << 4)
#define VCH_DITHERING (1 << 5)
uint8_t backlight_gpio;
uint8_t set_panel_type_us_gpios;
} __attribute__ ((packed));
#endif
static void ivch_dump_regs(struct intel_dvo_device *dvo);
/**
* Reads a register on the ivch.
*
* Each of the 256 registers are 16 bits long.
*/
static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
u8 out_buf[1];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
.addr = i2cbus->slave_addr,
.flags = I2C_M_RD,
.len = 0,
},
{
.addr = 0,
.flags = I2C_M_NOSTART,
.len = 1,
.buf = out_buf,
},
{
.addr = i2cbus->slave_addr,
.flags = I2C_M_RD | I2C_M_NOSTART,
.len = 2,
.buf = in_buf,
}
};
out_buf[0] = addr;
if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) {
*data = (in_buf[1] << 8) | in_buf[0];
return true;
};
if (!priv->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
addr, i2cbus->adapter.name, i2cbus->slave_addr);
}
return false;
}
/** Writes a 16-bit register on the ivch */
static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
u8 out_buf[3];
struct i2c_msg msg = {
.addr = i2cbus->slave_addr,
.flags = 0,
.len = 3,
.buf = out_buf,
};
out_buf[0] = addr;
out_buf[1] = data & 0xff;
out_buf[2] = data >> 8;
if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
return true;
if (!priv->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, i2cbus->slave_addr);
}
return false;
}
/** Probes the given bus and slave address for an ivch */
static bool ivch_init(struct intel_dvo_device *dvo,
struct intel_i2c_chan *i2cbus)
{
struct ivch_priv *priv;
uint16_t temp;
priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
if (priv == NULL)
return false;
dvo->i2c_bus = i2cbus;
dvo->i2c_bus->slave_addr = dvo->slave_addr;
dvo->dev_priv = priv;
priv->quiet = true;
if (!ivch_read(dvo, VR00, &temp))
goto out;
priv->quiet = false;
/* Since the identification bits are probably zeroes, which doesn't seem
* very unique, check that the value in the base address field matches
* the address it's responding on.
*/
if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
DRM_DEBUG("ivch detect failed due to address mismatch "
"(%d vs %d)\n",
(temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
goto out;
}
#if 0
if (!xf86I2CDevInit(&priv->d)) {
goto out;
}
#endif
ivch_read(dvo, VR20, &priv->width);
ivch_read(dvo, VR21, &priv->height);
return true;
out:
kfree(priv);
return false;
}
static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo)
{
return connector_status_connected;
}
static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
if (mode->clock > 112000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
/** Sets the power state of the panel connected to the ivch */
static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
{
int i;
uint16_t vr01, vr30, backlight;
/* Set the new power state of the panel. */
if (!ivch_read(dvo, VR01, &vr01))
return;
if (mode == DRM_MODE_DPMS_ON)
backlight = 1;
else
backlight = 0;
ivch_write(dvo, VR80, backlight);
if (mode == DRM_MODE_DPMS_ON)
vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
else
vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
ivch_write(dvo, VR01, vr01);
/* Wait for the panel to make its state transition */
for (i = 0; i < 100; i++) {
if (!ivch_read(dvo, VR30, &vr30))
break;
if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON))
break;
udelay(1000);
}
/* wait some more; vch may fail to resync sometimes without this */
udelay(16 * 1000);
}
static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
uint16_t vr40 = 0;
uint16_t vr01;
vr01 = 0;
vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
VR40_HORIZONTAL_INTERP_ENABLE);
if (mode->hdisplay != adjusted_mode->hdisplay ||
mode->vdisplay != adjusted_mode->vdisplay) {
uint16_t x_ratio, y_ratio;
vr01 |= VR01_PANEL_FIT_ENABLE;
vr40 |= VR40_CLOCK_GATING_ENABLE;
x_ratio = (((mode->hdisplay - 1) << 16) /
(adjusted_mode->hdisplay - 1)) >> 2;
y_ratio = (((mode->vdisplay - 1) << 16) /
(adjusted_mode->vdisplay - 1)) >> 2;
ivch_write (dvo, VR42, x_ratio);
ivch_write (dvo, VR41, y_ratio);
} else {
vr01 &= ~VR01_PANEL_FIT_ENABLE;
vr40 &= ~VR40_CLOCK_GATING_ENABLE;
}
vr40 &= ~VR40_AUTO_RATIO_ENABLE;
ivch_write(dvo, VR01, vr01);
ivch_write(dvo, VR40, vr40);
ivch_dump_regs(dvo);
}
static void ivch_dump_regs(struct intel_dvo_device *dvo)
{
uint16_t val;
ivch_read(dvo, VR00, &val);
DRM_DEBUG("VR00: 0x%04x\n", val);
ivch_read(dvo, VR01, &val);
DRM_DEBUG("VR01: 0x%04x\n", val);
ivch_read(dvo, VR30, &val);
DRM_DEBUG("VR30: 0x%04x\n", val);
ivch_read(dvo, VR40, &val);
DRM_DEBUG("VR40: 0x%04x\n", val);
/* GPIO registers */
ivch_read(dvo, VR80, &val);
DRM_DEBUG("VR80: 0x%04x\n", val);
ivch_read(dvo, VR81, &val);
DRM_DEBUG("VR81: 0x%04x\n", val);
ivch_read(dvo, VR82, &val);
DRM_DEBUG("VR82: 0x%04x\n", val);
ivch_read(dvo, VR83, &val);
DRM_DEBUG("VR83: 0x%04x\n", val);
ivch_read(dvo, VR84, &val);
DRM_DEBUG("VR84: 0x%04x\n", val);
ivch_read(dvo, VR85, &val);
DRM_DEBUG("VR85: 0x%04x\n", val);
ivch_read(dvo, VR86, &val);
DRM_DEBUG("VR86: 0x%04x\n", val);
ivch_read(dvo, VR87, &val);
DRM_DEBUG("VR87: 0x%04x\n", val);
ivch_read(dvo, VR88, &val);
DRM_DEBUG("VR88: 0x%04x\n", val);
/* Scratch register 0 - AIM Panel type */
ivch_read(dvo, VR8E, &val);
DRM_DEBUG("VR8E: 0x%04x\n", val);
/* Scratch register 1 - Status register */
ivch_read(dvo, VR8F, &val);
DRM_DEBUG("VR8F: 0x%04x\n", val);
}
static void ivch_save(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
ivch_read(dvo, VR01, &priv->save_VR01);
ivch_read(dvo, VR40, &priv->save_VR40);
}
static void ivch_restore(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
ivch_write(dvo, VR01, priv->save_VR01);
ivch_write(dvo, VR40, priv->save_VR40);
}
static void ivch_destroy(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
if (priv) {
kfree(priv);
dvo->dev_priv = NULL;
}
}
struct intel_dvo_dev_ops ivch_ops= {
.init = ivch_init,
.dpms = ivch_dpms,
.save = ivch_save,
.restore = ivch_restore,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,
.dump_regs = ivch_dump_regs,
.destroy = ivch_destroy,
};

302
linux-core/dvo_sil164.c Normal file
View File

@ -0,0 +1,302 @@
/**************************************************************************
Copyright © 2006 Dave Airlie
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sub license, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
#include "dvo.h"
#define SIL164_VID 0x0001
#define SIL164_DID 0x0006
#define SIL164_VID_LO 0x00
#define SIL164_VID_HI 0x01
#define SIL164_DID_LO 0x02
#define SIL164_DID_HI 0x03
#define SIL164_REV 0x04
#define SIL164_RSVD 0x05
#define SIL164_FREQ_LO 0x06
#define SIL164_FREQ_HI 0x07
#define SIL164_REG8 0x08
#define SIL164_8_VEN (1<<5)
#define SIL164_8_HEN (1<<4)
#define SIL164_8_DSEL (1<<3)
#define SIL164_8_BSEL (1<<2)
#define SIL164_8_EDGE (1<<1)
#define SIL164_8_PD (1<<0)
#define SIL164_REG9 0x09
#define SIL164_9_VLOW (1<<7)
#define SIL164_9_MSEL_MASK (0x7<<4)
#define SIL164_9_TSEL (1<<3)
#define SIL164_9_RSEN (1<<2)
#define SIL164_9_HTPLG (1<<1)
#define SIL164_9_MDI (1<<0)
#define SIL164_REGC 0x0c
struct sil164_save_rec {
uint8_t reg8;
uint8_t reg9;
uint8_t regc;
};
struct sil164_priv {
//I2CDevRec d;
bool quiet;
struct sil164_save_rec save_regs;
struct sil164_save_rec mode_regs;
};
#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
.addr = i2cbus->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = i2cbus->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
}
};
out_buf[0] = addr;
out_buf[1] = 0;
if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
if (!sil->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
addr, i2cbus->adapter.name, i2cbus->slave_addr);
}
return false;
}
static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct sil164_priv *sil= dvo->dev_priv;
struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = i2cbus->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
};
out_buf[0] = addr;
out_buf[1] = ch;
if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
return true;
if (!sil->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, i2cbus->slave_addr);
}
return false;
}
/* Silicon Image 164 driver for chip on i2c bus */
static bool sil164_init(struct intel_dvo_device *dvo,
struct intel_i2c_chan *i2cbus)
{
/* this will detect the SIL164 chip on the specified i2c bus */
struct sil164_priv *sil;
unsigned char ch;
sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL);
if (sil == NULL)
return false;
dvo->i2c_bus = i2cbus;
dvo->i2c_bus->slave_addr = dvo->slave_addr;
dvo->dev_priv = sil;
sil->quiet = true;
if (!sil164_readb(dvo, SIL164_VID_LO, &ch))
goto out;
if (ch != (SIL164_VID & 0xff)) {
DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
ch, i2cbus->adapter.name, i2cbus->slave_addr);
goto out;
}
if (!sil164_readb(dvo, SIL164_DID_LO, &ch))
goto out;
if (ch != (SIL164_DID & 0xff)) {
DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
ch, i2cbus->adapter.name, i2cbus->slave_addr);
goto out;
}
sil->quiet = false;
DRM_DEBUG("init sil164 dvo controller successfully!\n");
return true;
out:
kfree(sil);
return false;
}
static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
{
uint8_t reg9;
sil164_readb(dvo, SIL164_REG9, &reg9);
if (reg9 & SIL164_9_HTPLG)
return connector_status_connected;
else
return connector_status_disconnected;
}
static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static void sil164_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* As long as the basics are set up, since we don't have clock
* dependencies in the mode setup, we can just leave the
* registers alone and everything will work fine.
*/
/* recommended programming sequence from doc */
/*sil164_writeb(sil, 0x08, 0x30);
sil164_writeb(sil, 0x09, 0x00);
sil164_writeb(sil, 0x0a, 0x90);
sil164_writeb(sil, 0x0c, 0x89);
sil164_writeb(sil, 0x08, 0x31);*/
/* don't do much */
return;
}
/* set the SIL164 power state */
static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
{
int ret;
unsigned char ch;
ret = sil164_readb(dvo, SIL164_REG8, &ch);
if (ret == false)
return;
if (mode == DRM_MODE_DPMS_ON)
ch |= SIL164_8_PD;
else
ch &= ~SIL164_8_PD;
sil164_writeb(dvo, SIL164_REG8, ch);
return;
}
static void sil164_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
sil164_readb(dvo, SIL164_FREQ_LO, &val);
DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val);
sil164_readb(dvo, SIL164_FREQ_HI, &val);
DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG8, &val);
DRM_DEBUG("SIL164_REG8: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG9, &val);
DRM_DEBUG("SIL164_REG9: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REGC, &val);
DRM_DEBUG("SIL164_REGC: 0x%02x\n", val);
}
static void sil164_save(struct intel_dvo_device *dvo)
{
struct sil164_priv *sil= dvo->dev_priv;
if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
return;
if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
return;
if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
return;
return;
}
static void sil164_restore(struct intel_dvo_device *dvo)
{
struct sil164_priv *sil = dvo->dev_priv;
/* Restore it powered down initially */
sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
}
static void sil164_destroy(struct intel_dvo_device *dvo)
{
struct sil164_priv *sil = dvo->dev_priv;
if (sil) {
kfree(sil);
dvo->dev_priv = NULL;
}
}
struct intel_dvo_dev_ops sil164_ops = {
.init = sil164_init,
.detect = sil164_detect,
.mode_valid = sil164_mode_valid,
.mode_set = sil164_mode_set,
.dpms = sil164_dpms,
.dump_regs = sil164_dump_regs,
.save = sil164_save,
.restore = sil164_restore,
.destroy = sil164_destroy,
};

335
linux-core/dvo_tfp410.c Normal file
View File

@ -0,0 +1,335 @@
/*
* Copyright © 2007 Dave Mueller
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Dave Mueller <dave.mueller@gmx.ch>
*
*/
#include "dvo.h"
/* register definitions according to the TFP410 data sheet */
#define TFP410_VID 0x014C
#define TFP410_DID 0x0410
#define TFP410_VID_LO 0x00
#define TFP410_VID_HI 0x01
#define TFP410_DID_LO 0x02
#define TFP410_DID_HI 0x03
#define TFP410_REV 0x04
#define TFP410_CTL_1 0x08
#define TFP410_CTL_1_TDIS (1<<6)
#define TFP410_CTL_1_VEN (1<<5)
#define TFP410_CTL_1_HEN (1<<4)
#define TFP410_CTL_1_DSEL (1<<3)
#define TFP410_CTL_1_BSEL (1<<2)
#define TFP410_CTL_1_EDGE (1<<1)
#define TFP410_CTL_1_PD (1<<0)
#define TFP410_CTL_2 0x09
#define TFP410_CTL_2_VLOW (1<<7)
#define TFP410_CTL_2_MSEL_MASK (0x7<<4)
#define TFP410_CTL_2_MSEL (1<<4)
#define TFP410_CTL_2_TSEL (1<<3)
#define TFP410_CTL_2_RSEN (1<<2)
#define TFP410_CTL_2_HTPLG (1<<1)
#define TFP410_CTL_2_MDI (1<<0)
#define TFP410_CTL_3 0x0A
#define TFP410_CTL_3_DK_MASK (0x7<<5)
#define TFP410_CTL_3_DK (1<<5)
#define TFP410_CTL_3_DKEN (1<<4)
#define TFP410_CTL_3_CTL_MASK (0x7<<1)
#define TFP410_CTL_3_CTL (1<<1)
#define TFP410_USERCFG 0x0B
#define TFP410_DE_DLY 0x32
#define TFP410_DE_CTL 0x33
#define TFP410_DE_CTL_DEGEN (1<<6)
#define TFP410_DE_CTL_VSPOL (1<<5)
#define TFP410_DE_CTL_HSPOL (1<<4)
#define TFP410_DE_CTL_DEDLY8 (1<<0)
#define TFP410_DE_TOP 0x34
#define TFP410_DE_CNT_LO 0x36
#define TFP410_DE_CNT_HI 0x37
#define TFP410_DE_LIN_LO 0x38
#define TFP410_DE_LIN_HI 0x39
#define TFP410_H_RES_LO 0x3A
#define TFP410_H_RES_HI 0x3B
#define TFP410_V_RES_LO 0x3C
#define TFP410_V_RES_HI 0x3D
struct tfp410_save_rec {
uint8_t ctl1;
uint8_t ctl2;
};
struct tfp410_priv {
bool quiet;
struct tfp410_save_rec saved_reg;
struct tfp410_save_rec mode_reg;
};
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
.addr = i2cbus->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = i2cbus->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
}
};
out_buf[0] = addr;
out_buf[1] = 0;
if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
if (!tfp->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
addr, i2cbus->adapter.name, i2cbus->slave_addr);
}
return false;
}
static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = i2cbus->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
};
out_buf[0] = addr;
out_buf[1] = ch;
if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
return true;
if (!tfp->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, i2cbus->slave_addr);
}
return false;
}
static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
{
uint8_t ch1, ch2;
if (tfp410_readb(dvo, addr+0, &ch1) &&
tfp410_readb(dvo, addr+1, &ch2))
return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF);
return -1;
}
/* Ti TFP410 driver for chip on i2c bus */
static bool tfp410_init(struct intel_dvo_device *dvo,
struct intel_i2c_chan *i2cbus)
{
/* this will detect the tfp410 chip on the specified i2c bus */
struct tfp410_priv *tfp;
int id;
tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL);
if (tfp == NULL)
return false;
dvo->i2c_bus = i2cbus;
dvo->i2c_bus->slave_addr = dvo->slave_addr;
dvo->dev_priv = tfp;
tfp->quiet = true;
if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
id, i2cbus->adapter.name, i2cbus->slave_addr);
goto out;
}
if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
id, i2cbus->adapter.name, i2cbus->slave_addr);
goto out;
}
tfp->quiet = false;
return true;
out:
kfree(tfp);
return false;
}
static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
{
enum drm_connector_status ret = connector_status_disconnected;
uint8_t ctl2;
if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
if (ctl2 & TFP410_CTL_2_HTPLG)
ret = connector_status_connected;
else
ret = connector_status_disconnected;
}
return ret;
}
static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static void tfp410_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* As long as the basics are set up, since we don't have clock dependencies
* in the mode setup, we can just leave the registers alone and everything
* will work fine.
*/
/* don't do much */
return;
}
/* set the tfp410 power state */
static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
{
uint8_t ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return;
if (mode == DRM_MODE_DPMS_ON)
ctl1 |= TFP410_CTL_1_PD;
else
ctl1 &= ~TFP410_CTL_1_PD;
tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
}
static void tfp410_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val, val2;
tfp410_readb(dvo, TFP410_REV, &val);
DRM_DEBUG("TFP410_REV: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_1, &val);
DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_2, &val);
DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_3, &val);
DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_USERCFG, &val);
DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_DLY, &val);
DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CTL, &val);
DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_TOP, &val);
DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_H_RES_LO, &val);
tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_V_RES_LO, &val);
tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
static void tfp410_save(struct intel_dvo_device *dvo)
{
struct tfp410_priv *tfp = dvo->dev_priv;
if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
return;
if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
return;
}
static void tfp410_restore(struct intel_dvo_device *dvo)
{
struct tfp410_priv *tfp = dvo->dev_priv;
/* Restore it powered down initially */
tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
}
static void tfp410_destroy(struct intel_dvo_device *dvo)
{
struct tfp410_priv *tfp = dvo->dev_priv;
if (tfp) {
kfree(tfp);
dvo->dev_priv = NULL;
}
}
struct intel_dvo_dev_ops tfp410_ops = {
.init = tfp410_init,
.detect = tfp410_detect,
.mode_valid = tfp410_mode_valid,
.mode_set = tfp410_mode_set,
.dpms = tfp410_dpms,
.dump_regs = tfp410_dump_regs,
.save = tfp410_save,
.restore = tfp410_restore,
.destroy = tfp410_destroy,
};

View File

@ -1,303 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "i915_drm.h"
#include "i915_drv.h"
struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)
{
return drm_agp_init_ttm(dev);
}
int i915_fence_type(struct drm_buffer_object *bo,
uint32_t *fclass,
uint32_t *type)
{
if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
*type = 1;
return 0;
}
int i915_invalidate_caches(struct drm_device *dev, uint64_t flags)
{
/*
* FIXME: Only emit once per batchbuffer submission.
*/
uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
if (flags & DRM_BO_FLAG_READ)
flush_cmd |= MI_READ_FLUSH;
if (flags & DRM_BO_FLAG_EXE)
flush_cmd |= MI_EXE_FLUSH;
return i915_emit_mi_flush(dev, flush_cmd);
}
int i915_init_mem_type(struct drm_device *dev, uint32_t type,
struct drm_mem_type_manager *man)
{
switch (type) {
case DRM_BO_MEM_LOCAL:
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_CACHED;
man->drm_bus_maptype = 0;
man->gpu_offset = 0;
break;
case DRM_BO_MEM_TT:
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
man->io_offset = dev->agp->agp_info.aper_base;
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
man->io_addr = NULL;
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_AGP;
man->gpu_offset = 0;
break;
case DRM_BO_MEM_PRIV0:
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
man->io_offset = dev->agp->agp_info.aper_base;
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
man->io_addr = NULL;
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_AGP;
man->gpu_offset = 0;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
}
/*
* i915_evict_flags:
*
* @bo: the buffer object to be evicted
*
* Return the bo flags for a buffer which is not mapped to the hardware.
* These will be placed in proposed_flags so that when the move is
* finished, they'll end up in bo->mem.flags
*/
uint64_t i915_evict_flags(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
case DRM_BO_MEM_TT:
return DRM_BO_FLAG_MEM_LOCAL;
default:
return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
}
}
#if 0 /* See comment below */
static void i915_emit_copy_blit(struct drm_device * dev,
uint32_t src_offset,
uint32_t dst_offset,
uint32_t pages, int direction)
{
uint32_t cur_pages;
uint32_t stride = PAGE_SIZE;
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
if (!dev_priv)
return;
i915_kernel_lost_context(dev);
while (pages > 0) {
cur_pages = pages;
if (cur_pages > 2048)
cur_pages = 2048;
pages -= cur_pages;
BEGIN_LP_RING(6);
OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) |
(1 << 25) | (direction ? (1 << 30) : 0));
OUT_RING((cur_pages << 16) | PAGE_SIZE);
OUT_RING(dst_offset);
OUT_RING(stride & 0xffff);
OUT_RING(src_offset);
ADVANCE_LP_RING();
}
return;
}
static int i915_move_blit(struct drm_buffer_object * bo,
int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
{
struct drm_bo_mem_reg *old_mem = &bo->mem;
int dir = 0;
if ((old_mem->mem_type == new_mem->mem_type) &&
(new_mem->mm_node->start <
old_mem->mm_node->start + old_mem->mm_node->size)) {
dir = 1;
}
i915_emit_copy_blit(bo->dev,
old_mem->mm_node->start << PAGE_SHIFT,
new_mem->mm_node->start << PAGE_SHIFT,
new_mem->num_pages, dir);
i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH);
return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
DRM_FENCE_TYPE_EXE |
DRM_I915_FENCE_TYPE_RW,
DRM_I915_FENCE_FLAG_FLUSHED, new_mem);
}
/*
* Flip destination ttm into cached-coherent AGP,
* then blit and subsequently move out again.
*/
static int i915_move_flip(struct drm_buffer_object * bo,
int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
{
struct drm_device *dev = bo->dev;
struct drm_bo_mem_reg tmp_mem;
int ret;
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
if (ret)
return ret;
ret = drm_bind_ttm(bo->ttm, &tmp_mem);
if (ret)
goto out_cleanup;
ret = i915_move_blit(bo, 1, no_wait, &tmp_mem);
if (ret)
goto out_cleanup;
ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
mutex_lock(&dev->struct_mutex);
if (tmp_mem.mm_node != bo->pinned_node)
drm_mm_put_block(tmp_mem.mm_node);
tmp_mem.mm_node = NULL;
mutex_unlock(&dev->struct_mutex);
}
return ret;
}
#endif
/*
* Disable i915_move_flip for now, since we can't guarantee that the hardware
* lock is held here. To re-enable we need to make sure either
* a) The X server is using DRM to submit commands to the ring, or
* b) DRM can use the HP ring for these blits. This means i915 needs to
* implement a new ring submission mechanism and fence class.
*/
int i915_move(struct drm_buffer_object *bo,
int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
{
struct drm_bo_mem_reg *old_mem = &bo->mem;
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
if (1) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else {
if (1) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
}
return 0;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(char __force *)__p));
}
#endif
static inline void drm_cache_flush_addr(void *virt)
{
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
clflush(virt+i);
}
static inline void drm_cache_flush_page(struct page *p)
{
drm_cache_flush_addr(page_address(p));
}
void i915_flush_ttm(struct drm_ttm *ttm)
{
int i;
if (!ttm)
return;
DRM_MEMORYBARRIER();
#ifdef CONFIG_X86_32
/* Hopefully nobody has built an x86-64 processor without clflush */
if (!cpu_has_clflush) {
wbinvd();
DRM_MEMORYBARRIER();
return;
}
#endif
for (i = ttm->num_pages - 1; i >= 0; i--)
drm_cache_flush_page(drm_ttm_get_page(ttm, i));
DRM_MEMORYBARRIER();
}

View File

@ -30,6 +30,7 @@
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "intel_drv.h"
#include "i915_drv.h"
#include "drm_pciids.h"
@ -38,30 +39,14 @@ static struct pci_device_id pciidlist[] = {
i915_PCI_IDS
};
#ifdef I915_HAVE_FENCE
extern struct drm_fence_driver i915_fence_driver;
#endif
unsigned int i915_modeset = 0;
module_param_named(modeset, i915_modeset, int, 0400);
#ifdef I915_HAVE_BUFFER
unsigned int i915_fbpercrtc = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL};
static struct drm_bo_driver i915_bo_driver = {
.mem_type_prio = i915_mem_prios,
.mem_busy_prio = i915_busy_prios,
.num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
.num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
.create_ttm_backend_entry = i915_create_ttm_backend_entry,
.fence_type = i915_fence_type,
.invalidate_caches = i915_invalidate_caches,
.init_mem_type = i915_init_mem_type,
.evict_flags = i915_evict_flags,
.move = i915_move,
.ttm_cache_flush = i915_flush_ttm,
.command_stream_barrier = NULL,
};
#endif
unsigned int i915_rightof = 1;
module_param_named(i915_rightof, i915_rightof, int, 0400);
static int i915_suspend(struct drm_device *dev, pm_message_t state)
{
@ -140,6 +125,8 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
.proc_init = i915_gem_proc_init,
.proc_cleanup = i915_gem_proc_cleanup,
.ioctls = i915_ioctls,
@ -163,12 +150,6 @@ static struct drm_driver driver = {
.probe = probe,
.remove = remove,
},
#ifdef I915_HAVE_FENCE
.fence_driver = &i915_fence_driver,
#endif
#ifdef I915_HAVE_BUFFER
.bo_driver = &i915_bo_driver,
#endif
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@ -198,14 +179,17 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
static void remove(struct pci_dev *pdev)
{
drm_cleanup_pci(pdev);
if (pdev->msi_enabled)
pci_disable_msi(pdev);
drm_cleanup_pci(pdev);
}
static int __init i915_init(void)
{
driver.num_ioctls = i915_max_ioctl;
if (i915_modeset == 1)
driver.driver_features |= DRIVER_MODESET;
return drm_init(&driver, pciidlist);
}

View File

@ -1,917 +0,0 @@
/*
* Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* Dave Airlie
* Keith Packard
* ... ?
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#if DRM_DEBUG_CODE
#define DRM_DEBUG_RELOCATION (drm_debug != 0)
#else
#define DRM_DEBUG_RELOCATION 0
#endif
enum i915_buf_idle {
I915_RELOC_UNCHECKED,
I915_RELOC_IDLE,
I915_RELOC_BUSY
};
struct i915_relocatee_info {
struct drm_buffer_object *buf;
unsigned long offset;
uint32_t *data_page;
unsigned page_offset;
struct drm_bo_kmap_obj kmap;
int is_iomem;
int dst;
int idle;
int performed_ring_relocs;
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
unsigned long pfn;
pgprot_t pg_prot;
#endif
};
struct drm_i915_validate_buffer {
struct drm_buffer_object *buffer;
int presumed_offset_correct;
void __user *data;
int ret;
enum i915_buf_idle idle;
};
/*
* I'd like to use MI_STORE_DATA_IMM here, but I can't make
* it work. Seems like GART writes are broken with that
* instruction. Also I'm not sure that MI_FLUSH will
* act as a memory barrier for that instruction. It will
* for this single dword 2D blit.
*/
static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset,
uint32_t value)
{
struct drm_i915_private *dev_priv =
(struct drm_i915_private *)dev->dev_private;
RING_LOCALS;
i915_kernel_lost_context(dev);
BEGIN_LP_RING(6);
OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3));
OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40));
OUT_RING((0x1 << 16) | (0x4));
OUT_RING(offset);
OUT_RING(value);
OUT_RING(0);
ADVANCE_LP_RING();
}
static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer
*buffers, unsigned num_buffers)
{
while (num_buffers--)
drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
}
int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
struct drm_i915_validate_buffer *buffers,
struct i915_relocatee_info *relocatee, uint32_t * reloc)
{
unsigned index;
unsigned long new_cmd_offset;
u32 val;
int ret, i;
int buf_index = -1;
/*
* FIXME: O(relocs * buffers) complexity.
*/
for (i = 0; i <= num_buffers; i++)
if (buffers[i].buffer)
if (reloc[2] == buffers[i].buffer->base.hash.key)
buf_index = i;
if (buf_index == -1) {
DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
return -EINVAL;
}
/*
* Short-circuit relocations that were correctly
* guessed by the client
*/
if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
return 0;
new_cmd_offset = reloc[0];
if (!relocatee->data_page ||
!drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
struct drm_bo_mem_reg *mem = &relocatee->buf->mem;
drm_bo_kunmap(&relocatee->kmap);
relocatee->data_page = NULL;
relocatee->offset = new_cmd_offset;
if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) {
ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0);
if (ret)
return ret;
relocatee->idle = I915_RELOC_IDLE;
}
if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) &&
(mem->flags & DRM_BO_FLAG_CACHED_MAPPED)))
drm_bo_evict_cached(relocatee->buf);
ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
1, &relocatee->kmap);
if (ret) {
DRM_ERROR
("Could not map command buffer to apply relocs\n %08lx",
new_cmd_offset);
return ret;
}
relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
&relocatee->is_iomem);
relocatee->page_offset = (relocatee->offset & PAGE_MASK);
}
val = buffers[buf_index].buffer->offset;
index = (reloc[0] - relocatee->page_offset) >> 2;
/* add in validate */
val = val + reloc[1];
if (DRM_DEBUG_RELOCATION) {
if (buffers[buf_index].presumed_offset_correct &&
relocatee->data_page[index] != val) {
DRM_DEBUG
("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
reloc[0], reloc[1], buf_index,
relocatee->data_page[index], val);
}
}
if (relocatee->is_iomem)
iowrite32(val, relocatee->data_page + index);
else
relocatee->data_page[index] = val;
return 0;
}
int i915_process_relocs(struct drm_file *file_priv,
uint32_t buf_handle,
uint32_t __user ** reloc_user_ptr,
struct i915_relocatee_info *relocatee,
struct drm_i915_validate_buffer *buffers,
uint32_t num_buffers)
{
int ret, reloc_stride;
uint32_t cur_offset;
uint32_t reloc_count;
uint32_t reloc_type;
uint32_t reloc_buf_size;
uint32_t *reloc_buf = NULL;
int i;
/* do a copy from user from the user ptr */
ret = get_user(reloc_count, *reloc_user_ptr);
if (ret) {
DRM_ERROR("Could not map relocation buffer.\n");
goto out;
}
ret = get_user(reloc_type, (*reloc_user_ptr) + 1);
if (ret) {
DRM_ERROR("Could not map relocation buffer.\n");
goto out;
}
if (reloc_type != 0) {
DRM_ERROR("Unsupported relocation type requested\n");
ret = -EINVAL;
goto out;
}
reloc_buf_size =
(I915_RELOC_HEADER +
(reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
if (!reloc_buf) {
DRM_ERROR("Out of memory for reloc buffer\n");
ret = -ENOMEM;
goto out;
}
if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
ret = -EFAULT;
goto out;
}
/* get next relocate buffer handle */
*reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2];
reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count,
*reloc_user_ptr);
for (i = 0; i < reloc_count; i++) {
cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
ret = i915_apply_reloc(file_priv, num_buffers, buffers,
relocatee, reloc_buf + cur_offset);
if (ret)
goto out;
}
out:
if (reloc_buf)
kfree(reloc_buf);
if (relocatee->data_page) {
drm_bo_kunmap(&relocatee->kmap);
relocatee->data_page = NULL;
}
return ret;
}
static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
uint32_t __user * reloc_user_ptr,
struct drm_i915_validate_buffer *buffers,
uint32_t buf_count)
{
struct drm_device *dev = file_priv->minor->dev;
struct i915_relocatee_info relocatee;
int ret = 0;
int b;
/*
* Short circuit relocations when all previous
* buffers offsets were correctly guessed by
* the client
*/
if (!DRM_DEBUG_RELOCATION) {
for (b = 0; b < buf_count; b++)
if (!buffers[b].presumed_offset_correct)
break;
if (b == buf_count)
return 0;
}
memset(&relocatee, 0, sizeof(relocatee));
relocatee.idle = I915_RELOC_UNCHECKED;
mutex_lock(&dev->struct_mutex);
relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!relocatee.buf) {
DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
ret = -EINVAL;
goto out_err;
}
mutex_lock(&relocatee.buf->mutex);
while (reloc_user_ptr) {
ret =
i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr,
&relocatee, buffers, buf_count);
if (ret) {
DRM_ERROR("process relocs failed\n");
goto out_err1;
}
}
out_err1:
mutex_unlock(&relocatee.buf->mutex);
drm_bo_usage_deref_unlocked(&relocatee.buf);
out_err:
return ret;
}
static void i915_clear_relocatee(struct i915_relocatee_info *relocatee)
{
if (relocatee->data_page) {
#ifndef DRM_KMAP_ATOMIC_PROT_PFN
drm_bo_kunmap(&relocatee->kmap);
#else
kunmap_atomic(relocatee->data_page, KM_USER0);
#endif
relocatee->data_page = NULL;
}
relocatee->buf = NULL;
relocatee->dst = ~0;
}
static int i915_update_relocatee(struct i915_relocatee_info *relocatee,
struct drm_i915_validate_buffer *buffers,
unsigned int dst, unsigned long dst_offset)
{
int ret;
if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) {
i915_clear_relocatee(relocatee);
relocatee->dst = dst;
relocatee->buf = buffers[dst].buffer;
relocatee->idle = buffers[dst].idle;
/*
* Check for buffer idle. If the buffer is busy, revert to
* ring relocations.
*/
if (relocatee->idle == I915_RELOC_UNCHECKED) {
preempt_enable();
mutex_lock(&relocatee->buf->mutex);
ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0);
if (ret == 0)
relocatee->idle = I915_RELOC_IDLE;
else {
relocatee->idle = I915_RELOC_BUSY;
relocatee->performed_ring_relocs = 1;
}
mutex_unlock(&relocatee->buf->mutex);
preempt_disable();
buffers[dst].idle = relocatee->idle;
}
}
if (relocatee->idle == I915_RELOC_BUSY)
return 0;
if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) {
DRM_ERROR("Relocation destination out of bounds.\n");
return -EINVAL;
}
if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) ||
NULL == relocatee->data_page)) {
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
if (NULL != relocatee->data_page) {
kunmap_atomic(relocatee->data_page, KM_USER0);
relocatee->data_page = NULL;
}
ret = drm_bo_pfn_prot(relocatee->buf, dst_offset,
&relocatee->pfn, &relocatee->pg_prot);
if (ret) {
DRM_ERROR("Can't map relocation destination.\n");
return -EINVAL;
}
relocatee->data_page =
kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0,
relocatee->pg_prot);
#else
if (NULL != relocatee->data_page) {
drm_bo_kunmap(&relocatee->kmap);
relocatee->data_page = NULL;
}
ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT,
1, &relocatee->kmap);
if (ret) {
DRM_ERROR("Can't map relocation destination.\n");
return ret;
}
relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
&relocatee->is_iomem);
#endif
relocatee->page_offset = dst_offset & PAGE_MASK;
}
return 0;
}
static int i915_apply_post_reloc(uint32_t reloc[],
struct drm_i915_validate_buffer *buffers,
uint32_t num_buffers,
struct i915_relocatee_info *relocatee)
{
uint32_t reloc_buffer = reloc[2];
uint32_t dst_buffer = reloc[3];
uint32_t val;
uint32_t index;
int ret;
if (likely(buffers[reloc_buffer].presumed_offset_correct))
return 0;
if (unlikely(reloc_buffer >= num_buffers)) {
DRM_ERROR("Invalid reloc buffer index.\n");
return -EINVAL;
}
if (unlikely(dst_buffer >= num_buffers)) {
DRM_ERROR("Invalid dest buffer index.\n");
return -EINVAL;
}
ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]);
if (unlikely(ret))
return ret;
val = buffers[reloc_buffer].buffer->offset;
index = (reloc[0] - relocatee->page_offset) >> 2;
val = val + reloc[1];
if (relocatee->idle == I915_RELOC_BUSY) {
i915_emit_ring_reloc(relocatee->buf->dev,
relocatee->buf->offset + reloc[0], val);
return 0;
}
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
relocatee->data_page[index] = val;
#else
if (likely(relocatee->is_iomem))
iowrite32(val, relocatee->data_page + index);
else
relocatee->data_page[index] = val;
#endif
return 0;
}
static int i915_post_relocs(struct drm_file *file_priv,
uint32_t __user * new_reloc_ptr,
struct drm_i915_validate_buffer *buffers,
unsigned int num_buffers)
{
uint32_t *reloc;
uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);
uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t);
struct i915_relocatee_info relocatee;
uint32_t reloc_type;
uint32_t num_relocs;
uint32_t count;
int ret = 0;
int i;
int short_circuit = 1;
uint32_t __user *reloc_ptr;
uint64_t new_reloc_data;
uint32_t reloc_buf_size;
uint32_t *reloc_buf;
for (i = 0; i < num_buffers; ++i) {
if (unlikely(!buffers[i].presumed_offset_correct)) {
short_circuit = 0;
break;
}
}
if (likely(short_circuit))
return 0;
memset(&relocatee, 0, sizeof(relocatee));
while (new_reloc_ptr) {
reloc_ptr = new_reloc_ptr;
ret = get_user(num_relocs, reloc_ptr);
if (unlikely(ret))
goto out;
if (unlikely(!access_ok(VERIFY_READ, reloc_ptr,
header_size +
num_relocs * reloc_stride)))
return -EFAULT;
ret = __get_user(reloc_type, reloc_ptr + 1);
if (unlikely(ret))
goto out;
if (unlikely(reloc_type != 1)) {
DRM_ERROR("Unsupported relocation type requested.\n");
ret = -EINVAL;
goto out;
}
ret = __get_user(new_reloc_data, reloc_ptr + 2);
new_reloc_ptr = (uint32_t __user *) (unsigned long)
new_reloc_data;
reloc_ptr += I915_RELOC_HEADER;
if (num_relocs == 0)
goto out;
reloc_buf_size =
(num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t);
reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
if (!reloc_buf) {
DRM_ERROR("Out of memory for reloc buffer\n");
ret = -ENOMEM;
goto out;
}
if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) {
ret = -EFAULT;
goto out;
}
reloc = reloc_buf;
preempt_disable();
for (count = 0; count < num_relocs; ++count) {
ret = i915_apply_post_reloc(reloc, buffers,
num_buffers, &relocatee);
if (unlikely(ret)) {
preempt_enable();
goto out;
}
reloc += I915_RELOC0_STRIDE;
}
preempt_enable();
if (reloc_buf) {
kfree(reloc_buf);
reloc_buf = NULL;
}
i915_clear_relocatee(&relocatee);
}
out:
/*
* Flush ring relocs so the command parser will pick them up.
*/
if (relocatee.performed_ring_relocs)
(void)i915_emit_mi_flush(file_priv->minor->dev, 0);
i915_clear_relocatee(&relocatee);
if (reloc_buf) {
kfree(reloc_buf);
reloc_buf = NULL;
}
return ret;
}
static int i915_check_presumed(struct drm_i915_op_arg *arg,
struct drm_buffer_object *bo,
uint32_t __user * data, int *presumed_ok)
{
struct drm_bo_op_req *req = &arg->d.req;
uint32_t hint_offset;
uint32_t hint = req->bo_req.hint;
*presumed_ok = 0;
if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
return 0;
if (bo->offset == req->bo_req.presumed_offset) {
*presumed_ok = 1;
return 0;
}
/*
* We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
* the user-space IOCTL argument list, since the buffer has moved,
* we're about to apply relocations and we might subsequently
* hit an -EAGAIN. In that case the argument list will be reused by
* user-space, but the presumed offset is no longer valid.
*
* Needless to say, this is a bit ugly.
*/
hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
return __put_user(hint, data + hint_offset);
}
/*
* Validate, add fence and relocate a block of bos from a userspace list
*/
int i915_validate_buffer_list(struct drm_file *file_priv,
unsigned int fence_class, uint64_t data,
struct drm_i915_validate_buffer *buffers,
uint32_t * num_buffers,
uint32_t __user ** post_relocs)
{
struct drm_i915_op_arg arg;
struct drm_bo_op_req *req = &arg.d.req;
int ret = 0;
unsigned buf_count = 0;
uint32_t buf_handle;
uint32_t __user *reloc_user_ptr;
struct drm_i915_validate_buffer *item = buffers;
*post_relocs = NULL;
do {
if (buf_count >= *num_buffers) {
DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
ret = -EINVAL;
goto out_err;
}
item = buffers + buf_count;
item->buffer = NULL;
item->presumed_offset_correct = 0;
item->idle = I915_RELOC_UNCHECKED;
if (copy_from_user
(&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
ret = -EFAULT;
goto out_err;
}
ret = 0;
if (req->op != drm_bo_validate) {
DRM_ERROR
("Buffer object operation wasn't \"validate\".\n");
ret = -EINVAL;
goto out_err;
}
item->ret = 0;
item->data = (void __user *)(unsigned long)data;
buf_handle = req->bo_req.handle;
reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr;
/*
* Switch mode to post-validation relocations?
*/
if (unlikely((buf_count == 0) && (*post_relocs == NULL) &&
(reloc_user_ptr != NULL))) {
uint32_t reloc_type;
ret = get_user(reloc_type, reloc_user_ptr + 1);
if (ret)
goto out_err;
if (reloc_type == 1)
*post_relocs = reloc_user_ptr;
}
if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) {
ret =
i915_exec_reloc(file_priv, buf_handle,
reloc_user_ptr, buffers, buf_count);
if (ret)
goto out_err;
DRM_MEMORYBARRIER();
}
ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
req->bo_req.flags,
req->bo_req.mask, req->bo_req.hint,
req->bo_req.fence_class,
NULL, &item->buffer);
if (ret) {
DRM_ERROR("error on handle validate %d\n", ret);
goto out_err;
}
buf_count++;
ret = i915_check_presumed(&arg, item->buffer,
(uint32_t __user *)
(unsigned long)data,
&item->presumed_offset_correct);
if (ret)
goto out_err;
data = arg.next;
} while (data != 0);
out_err:
*num_buffers = buf_count;
item->ret = (ret != -EAGAIN) ? ret : 0;
return ret;
}
/*
* Remove all buffers from the unfenced list.
* If the execbuffer operation was aborted, for example due to a signal,
* this also make sure that buffers retain their original state and
* fence pointers.
* Copy back buffer information to user-space unless we were interrupted
* by a signal. In which case the IOCTL must be rerun.
*/
static int i915_handle_copyback(struct drm_device *dev,
struct drm_i915_validate_buffer *buffers,
unsigned int num_buffers, int ret)
{
int err = ret;
int i;
struct drm_i915_op_arg arg;
struct drm_buffer_object *bo;
if (ret)
drm_putback_buffer_objects(dev);
if (ret != -EAGAIN) {
for (i = 0; i < num_buffers; ++i) {
arg.handled = 1;
arg.d.rep.ret = buffers->ret;
bo = buffers->buffer;
mutex_lock(&bo->mutex);
drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
mutex_unlock(&bo->mutex);
if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
err = -EFAULT;
buffers++;
}
}
return err;
}
/*
* Create a fence object, and if that fails, pretend that everything is
* OK and just idle the GPU.
*/
void i915_fence_or_sync(struct drm_file *file_priv,
uint32_t fence_flags,
struct drm_fence_arg *fence_arg,
struct drm_fence_object **fence_p)
{
struct drm_device *dev = file_priv->minor->dev;
int ret;
struct drm_fence_object *fence;
ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence);
if (ret) {
/*
* Fence creation failed.
* Fall back to synchronous operation and idle the engine.
*/
(void)i915_emit_mi_flush(dev, MI_READ_FLUSH);
(void)i915_quiescent(dev);
if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
/*
* Communicate to user-space that
* fence creation has failed and that
* the engine is idle.
*/
fence_arg->handle = ~0;
fence_arg->error = ret;
}
drm_putback_buffer_objects(dev);
if (fence_p)
*fence_p = NULL;
return;
}
if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
ret = drm_fence_add_user_object(file_priv, fence,
fence_flags &
DRM_FENCE_FLAG_SHAREABLE);
if (!ret)
drm_fence_fill_arg(fence, fence_arg);
else {
/*
* Fence user object creation failed.
* We must idle the engine here as well, as user-
* space expects a fence object to wait on. Since we
* have a fence object we wait for it to signal
* to indicate engine "sufficiently" idle.
*/
(void)drm_fence_object_wait(fence, 0, 1, fence->type);
drm_fence_usage_deref_unlocked(&fence);
fence_arg->handle = ~0;
fence_arg->error = ret;
}
}
if (fence_p)
*fence_p = fence;
else if (fence)
drm_fence_usage_deref_unlocked(&fence);
}
int i915_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
dev_priv->sarea_priv;
struct drm_i915_execbuffer *exec_buf = data;
struct drm_i915_batchbuffer *batch = &exec_buf->batch;
struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
int num_buffers;
int ret;
uint32_t __user *post_relocs;
if (!dev_priv->allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
}
if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
batch->num_cliprects *
sizeof(struct
drm_clip_rect)))
return -EFAULT;
if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
return -EINVAL;
ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
if (ret)
return ret;
/*
* The cmdbuf_mutex makes sure the validate-submit-fence
* operation is atomic.
*/
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
if (ret) {
drm_bo_read_unlock(&dev->bm.bm_lock);
return -EAGAIN;
}
num_buffers = exec_buf->num_buffers;
if (!dev_priv->val_bufs) {
dev_priv->val_bufs =
vmalloc(sizeof(struct drm_i915_validate_buffer) *
dev_priv->max_validate_buffers);
}
if (!dev_priv->val_bufs) {
drm_bo_read_unlock(&dev->bm.bm_lock);
mutex_unlock(&dev_priv->cmdbuf_mutex);
return -ENOMEM;
}
/* validate buffer list + fixup relocations */
ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
dev_priv->val_bufs, &num_buffers,
&post_relocs);
if (ret)
goto out_err0;
if (post_relocs) {
ret = i915_post_relocs(file_priv, post_relocs,
dev_priv->val_bufs, num_buffers);
if (ret)
goto out_err0;
}
/* make sure all previous memory operations have passed */
DRM_MEMORYBARRIER();
if (!post_relocs) {
drm_agp_chipset_flush(dev);
batch->start =
dev_priv->val_bufs[num_buffers - 1].buffer->offset;
} else {
batch->start += dev_priv->val_bufs[0].buffer->offset;
}
DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
batch->start, batch->used, batch->num_cliprects);
ret = i915_dispatch_batchbuffer(dev, batch);
if (ret)
goto out_err0;
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
out_err0:
ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret);
mutex_lock(&dev->struct_mutex);
i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->cmdbuf_mutex);
drm_bo_read_unlock(&dev->bm.bm_lock);
return ret;
}

View File

@ -1,273 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
/*
* Initiate a sync flush if it's not already pending.
*/
static inline void i915_initiate_rwflush(struct drm_i915_private *dev_priv,
struct drm_fence_class_manager *fc)
{
if ((fc->pending_flush & DRM_I915_FENCE_TYPE_RW) &&
!dev_priv->flush_pending) {
dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
dev_priv->flush_flags = fc->pending_flush;
dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
dev_priv->flush_pending = 1;
fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
}
}
static inline void i915_report_rwflush(struct drm_device *dev,
struct drm_i915_private *dev_priv)
{
if (unlikely(dev_priv->flush_pending)) {
uint32_t flush_flags;
uint32_t i_status;
uint32_t flush_sequence;
i_status = READ_HWSP(dev_priv, 0);
if ((i_status & (1 << 12)) !=
(dev_priv->saved_flush_status & (1 << 12))) {
flush_flags = dev_priv->flush_flags;
flush_sequence = dev_priv->flush_sequence;
dev_priv->flush_pending = 0;
drm_fence_handler(dev, 0, flush_sequence,
flush_flags, 0);
}
}
}
static void i915_fence_flush(struct drm_device *dev,
uint32_t fence_class)
{
struct drm_i915_private *dev_priv =
(struct drm_i915_private *) dev->dev_private;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[0];
unsigned long irq_flags;
if (unlikely(!dev_priv))
return;
write_lock_irqsave(&fm->lock, irq_flags);
i915_initiate_rwflush(dev_priv, fc);
write_unlock_irqrestore(&fm->lock, irq_flags);
}
static void i915_fence_poll(struct drm_device *dev, uint32_t fence_class,
uint32_t waiting_types)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[0];
uint32_t sequence;
if (unlikely(!dev_priv))
return;
/*
* First, report any executed sync flush:
*/
i915_report_rwflush(dev, dev_priv);
/*
* Report A new breadcrumb, and adjust IRQs.
*/
if (waiting_types & DRM_FENCE_TYPE_EXE) {
sequence = READ_BREADCRUMB(dev_priv);
drm_fence_handler(dev, 0, sequence,
DRM_FENCE_TYPE_EXE, 0);
if (dev_priv->fence_irq_on &&
!(fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
i915_user_irq_off(dev_priv);
dev_priv->fence_irq_on = 0;
} else if (!dev_priv->fence_irq_on &&
(fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
i915_user_irq_on(dev_priv);
dev_priv->fence_irq_on = 1;
}
}
/*
* There may be new RW flushes pending. Start them.
*/
i915_initiate_rwflush(dev_priv, fc);
/*
* And possibly, but unlikely, they finish immediately.
*/
i915_report_rwflush(dev, dev_priv);
}
static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
uint32_t flags, uint32_t *sequence,
uint32_t *native_type)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
if (unlikely(!dev_priv))
return -EINVAL;
i915_emit_irq(dev);
*sequence = (uint32_t) dev_priv->counter;
*native_type = DRM_FENCE_TYPE_EXE;
if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
*native_type |= DRM_I915_FENCE_TYPE_RW;
return 0;
}
void i915_fence_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[0];
write_lock(&fm->lock);
if (likely(dev_priv->fence_irq_on))
i915_fence_poll(dev, 0, fc->waiting_types);
write_unlock(&fm->lock);
}
/*
* We need a separate wait function since we need to poll for
* sync flushes.
*/
static int i915_fence_wait(struct drm_fence_object *fence,
int lazy, int interruptible, uint32_t mask)
{
struct drm_device *dev = fence->dev;
drm_i915_private_t *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[0];
int ret;
unsigned long _end = jiffies + 3 * DRM_HZ;
drm_fence_object_flush(fence, mask);
if (likely(interruptible))
ret = wait_event_interruptible_timeout
(fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
3 * DRM_HZ);
else
ret = wait_event_timeout
(fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
3 * DRM_HZ);
if (unlikely(ret == -ERESTARTSYS))
return -EAGAIN;
if (unlikely(ret == 0))
return -EBUSY;
if (likely(mask == DRM_FENCE_TYPE_EXE ||
drm_fence_object_signaled(fence, mask)))
return 0;
/*
* Remove this code snippet when fixed. HWSTAM doesn't let
* flush info through...
*/
if (unlikely(dev_priv && !dev_priv->irq_enabled)) {
unsigned long irq_flags;
DRM_ERROR("X server disabled IRQs before releasing frame buffer.\n");
msleep(100);
dev_priv->flush_pending = 0;
write_lock_irqsave(&fm->lock, irq_flags);
drm_fence_handler(dev, fence->fence_class,
fence->sequence, fence->type, 0);
write_unlock_irqrestore(&fm->lock, irq_flags);
}
/*
* Poll for sync flush completion.
*/
return drm_fence_wait_polling(fence, lazy, interruptible, mask, _end);
}
static uint32_t i915_fence_needed_flush(struct drm_fence_object *fence)
{
uint32_t flush_flags = fence->waiting_types &
~(DRM_FENCE_TYPE_EXE | fence->signaled_types);
if (likely(flush_flags == 0 ||
((flush_flags & ~fence->native_types) == 0) ||
(fence->signaled_types != DRM_FENCE_TYPE_EXE)))
return 0;
else {
struct drm_device *dev = fence->dev;
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_fence_driver *driver = dev->driver->fence_driver;
if (unlikely(!dev_priv))
return 0;
if (dev_priv->flush_pending) {
uint32_t diff = (dev_priv->flush_sequence - fence->sequence) &
driver->sequence_mask;
if (diff < driver->wrap_diff)
return 0;
}
}
return flush_flags;
}
struct drm_fence_driver i915_fence_driver = {
.num_classes = 1,
.wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
.flush_diff = (1U << (BREADCRUMB_BITS - 2)),
.sequence_mask = BREADCRUMB_MASK,
.has_irq = NULL,
.emit = i915_fence_emit_sequence,
.flush = i915_fence_flush,
.poll = i915_fence_poll,
.needed_flush = i915_fence_needed_flush,
.wait = i915_fence_wait,
};

View File

@ -32,10 +32,6 @@
#include "i915_drv.h"
#include <linux/swap.h>
static int
i915_gem_object_set_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
static int
i915_gem_object_set_domain_range(struct drm_gem_object *obj,
uint64_t offset,
@ -51,30 +47,37 @@ static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (start >= end ||
(start & (PAGE_SIZE - 1)) != 0 ||
(end & (PAGE_SIZE - 1)) != 0) {
return -EINVAL;
}
drm_mm_init(&dev_priv->mm.gtt_space, start,
end - start);
dev->gtt_total = (uint32_t) (end - start);
return 0;
}
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_init *args = data;
int ret;
mutex_lock(&dev->struct_mutex);
if (args->gtt_start >= args->gtt_end ||
(args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
(args->gtt_end & (PAGE_SIZE - 1)) != 0) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
args->gtt_end - args->gtt_start);
dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
mutex_unlock(&dev->struct_mutex);
return 0;
return ret;
}
@ -501,7 +504,7 @@ static void
i915_gem_object_move_to_active(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
/* Add a reference if we're newly entering the active list. */
@ -519,7 +522,7 @@ static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
i915_verify_inactive(dev, __FILE__, __LINE__);
@ -546,7 +549,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
static uint32_t
i915_add_request(struct drm_device *dev, uint32_t flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *request;
uint32_t seqno;
int was_empty;
@ -594,7 +597,7 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
uint32_t
i915_retire_commands(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
uint32_t flush_domains = 0;
RING_LOCALS;
@ -617,7 +620,7 @@ static void
i915_gem_retire_request(struct drm_device *dev,
struct drm_i915_gem_request *request)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
if (request->flush_domains != 0) {
struct drm_i915_gem_object *obj_priv, *next;
@ -688,7 +691,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
uint32_t
i915_get_gem_seqno(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
}
@ -699,7 +702,7 @@ i915_get_gem_seqno(struct drm_device *dev)
void
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t seqno;
seqno = i915_get_gem_seqno(dev);
@ -727,10 +730,10 @@ i915_gem_retire_requests(struct drm_device *dev)
void
i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
struct drm_i915_private *dev_priv;
struct drm_device *dev;
dev_priv = container_of(work, drm_i915_private_t,
dev_priv = container_of(work, struct drm_i915_private,
mm.retire_work.work);
dev = dev_priv->dev;
@ -748,19 +751,19 @@ i915_gem_retire_work_handler(struct work_struct *work)
int
i915_wait_request(struct drm_device *dev, uint32_t seqno)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
BUG_ON(seqno == 0);
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
dev_priv->mm.waiting_gem_seqno = seqno;
i915_user_irq_on(dev_priv);
i915_user_irq_on(dev);
ret = wait_event_interruptible(dev_priv->irq_queue,
i915_seqno_passed(i915_get_gem_seqno(dev),
seqno) ||
dev_priv->mm.wedged);
i915_user_irq_off(dev_priv);
i915_user_irq_off(dev);
dev_priv->mm.waiting_gem_seqno = 0;
}
if (dev_priv->mm.wedged)
@ -786,7 +789,7 @@ i915_gem_flush(struct drm_device *dev,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
RING_LOCALS;
@ -972,7 +975,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
static int
i915_gem_evict_something(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret = 0;
@ -1102,7 +1105,7 @@ static int
i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_mm_node *free_space;
int page_count, ret;
@ -1311,7 +1314,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
* MI_FLUSH
* drm_agp_chipset_flush
*/
static int
int
i915_gem_object_set_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain)
@ -1675,7 +1678,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
struct drm_i915_gem_execbuffer *exec,
uint64_t exec_offset)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
(uintptr_t) exec->cliprects_ptr;
int nbox = exec->num_cliprects;
@ -1758,7 +1761,7 @@ int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_exec_object *exec_list = NULL;
@ -2000,7 +2003,7 @@ void
i915_gem_object_unpin(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
i915_verify_inactive(dev, __FILE__, __LINE__);
@ -2217,7 +2220,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
static int
i915_gem_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t seqno, cur_seqno, last_seqno;
int stuck;
@ -2289,7 +2292,7 @@ i915_gem_idle(struct drm_device *dev)
static int
i915_gem_init_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret;
@ -2336,10 +2339,10 @@ i915_gem_init_hws(struct drm_device *dev)
return 0;
}
static int
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret;
@ -2400,10 +2403,10 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
return 0;
}
static void
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->ring.ring_obj == NULL)
return;
@ -2430,9 +2433,12 @@ int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
if (dev_priv->mm.wedged) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
dev_priv->mm.wedged = 0;
@ -2458,6 +2464,9 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
{
int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
mutex_lock(&dev->struct_mutex);
ret = i915_gem_idle(dev);
if (ret == 0)
@ -2471,7 +2480,7 @@ void
i915_gem_lastclose(struct drm_device *dev)
{
int ret;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
@ -2488,7 +2497,7 @@ i915_gem_lastclose(struct drm_device *dev)
void i915_gem_load(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);

View File

@ -35,7 +35,7 @@
void
i915_verify_inactive(struct drm_device *dev, char *file, int line)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
@ -102,7 +102,7 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
void
i915_dump_lru(struct drm_device *dev, const char *where)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
DRM_INFO("active list %s {\n", where);

View File

@ -37,7 +37,7 @@ static int i915_gem_active_info(char *buf, char **start, off_t offset,
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
@ -76,7 +76,7 @@ static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
@ -114,7 +114,7 @@ static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
@ -152,7 +152,7 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
int len = 0;
@ -183,7 +183,7 @@ static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
@ -209,7 +209,7 @@ static int i915_interrupt_info(char *buf, char **start, off_t offset,
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int len = 0;
if (offset > DRM_PROC_LIMIT) {

View File

@ -86,7 +86,7 @@
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *bridge;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@ -115,7 +115,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
* since the bridge would only ever use standard BARs 0-1 (though it
* doesn't anyway)
*/
ret = pci_read_base(bridge, mchbar_offset, &bridge->resource[2]);
ret = pci_read_base(bridge, pci_bar_mem64, &bridge->resource[2],
mchbar_offset);
if (ret != 0) {
DRM_ERROR("pci_read_base failed: %d\n", ret);
return;
@ -233,7 +234,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
@ -275,7 +276,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_get_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;

1
linux-core/i915_init.c Symbolic link
View File

@ -0,0 +1 @@
../shared-core/i915_init.c

View File

@ -183,73 +183,12 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
DRM_IOCTL_I915_ALLOC, (unsigned long) request);
}
typedef struct drm_i915_execbuffer32 {
uint64_t ops_list;
uint32_t num_buffers;
struct _drm_i915_batchbuffer32 batch;
drm_context_t context;
struct drm_fence_arg fence_arg;
} drm_i915_execbuffer32_t;
static int compat_i915_execbuffer(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_execbuffer32_t req32;
struct drm_i915_execbuffer __user *request;
int err;
if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.ops_list, &request->ops_list)
|| __put_user(req32.num_buffers, &request->num_buffers)
|| __put_user(req32.context, &request->context)
|| __copy_to_user(&request->fence_arg, &req32.fence_arg,
sizeof(req32.fence_arg))
|| __put_user(req32.batch.start, &request->batch.start)
|| __put_user(req32.batch.used, &request->batch.used)
|| __put_user(req32.batch.DR1, &request->batch.DR1)
|| __put_user(req32.batch.DR4, &request->batch.DR4)
|| __put_user(req32.batch.num_cliprects,
&request->batch.num_cliprects)
|| __put_user((int __user *)(unsigned long)req32.batch.cliprects,
&request->batch.cliprects))
return -EFAULT;
err = drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_I915_EXECBUFFER, (unsigned long)request);
if (err)
return err;
if (__get_user(req32.fence_arg.handle, &request->fence_arg.handle)
|| __get_user(req32.fence_arg.fence_class, &request->fence_arg.fence_class)
|| __get_user(req32.fence_arg.type, &request->fence_arg.type)
|| __get_user(req32.fence_arg.flags, &request->fence_arg.flags)
|| __get_user(req32.fence_arg.signaled, &request->fence_arg.signaled)
|| __get_user(req32.fence_arg.error, &request->fence_arg.error)
|| __get_user(req32.fence_arg.sequence, &request->fence_arg.sequence))
return -EFAULT;
if (copy_to_user((void __user *)arg, &req32, sizeof(req32)))
return -EFAULT;
return 0;
}
drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
[DRM_I915_GETPARAM] = compat_i915_getparam,
[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
[DRM_I915_ALLOC] = compat_i915_alloc,
#ifdef I915_HAVE_BUFFER
[DRM_I915_EXECBUFFER] = compat_i915_execbuffer,
#endif
};
/**

242
linux-core/intel_bios.c Normal file
View File

@ -0,0 +1,242 @@
/*
* Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_bios.h"
#if 0
unsigned char *
i830_bios_get_aim_data_block (ScrnInfoPtr pScrn, int aim, int data_block)
{
unsigned char *bios;
int bdb_off;
int vbt_off;
int aim_off;
struct vbt_header *vbt;
struct aimdb_header *aimdb;
struct aimdb_block *aimdb_block;
bios = i830_bios_get (pScrn);
if (!bios)
return NULL;
vbt_off = INTEL_BIOS_16(0x1a);
vbt = (struct vbt_header *)(bios + vbt_off);
aim_off = vbt->aim_offset[aim];
if (!aim_off)
{
free (bios);
return NULL;
}
xf86DrvMsg(pScrn->scrnIndex, X_INFO, "aim_off %d\n", aim_off);
aimdb = (struct aimdb_header *) (bios + vbt_off + aim_off);
bdb_off = aimdb->aimdb_header_size;
while (bdb_off < aimdb->aimdb_size)
{
aimdb_block = (struct aimdb_block *) (bios + vbt_off + aim_off + bdb_off);
if (aimdb_block->aimdb_id == data_block)
{
unsigned char *aim = malloc (aimdb_block->aimdb_size + sizeof (struct aimdb_block));
if (!aim)
{
free (bios);
return NULL;
}
memcpy (aim, aimdb_block, aimdb_block->aimdb_size + sizeof (struct aimdb_block));
free (bios);
return aim;
}
bdb_off += aimdb_block->aimdb_size + sizeof (struct aimdb_block);
}
free (bios);
return NULL;
}
#endif
static void *
find_section(struct bdb_header *bdb, int section_id)
{
u8 *base = (u8 *)bdb;
int index = 0;
u16 total, current_size;
u8 current_id;
/* skip to first section */
index += bdb->header_size;
total = bdb->bdb_size;
/* walk the sections looking for section_id */
while (index < total) {
current_id = *(base + index);
index++;
current_size = *((u16 *)(base + index));
index += 2;
if (current_id == section_id)
return base + index;
index += current_size;
}
return NULL;
}
/* Try to find panel data */
static void
parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
struct bdb_lvds_options *lvds_options;
struct bdb_lvds_lfp_data *lvds_lfp_data;
struct bdb_lvds_lfp_data_entry *entry;
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
/* Defaults if we can't find VBT info */
dev_priv->lvds_dither = 0;
dev_priv->lvds_vbt = 0;
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
dev_priv->lvds_dither = lvds_options->pixel_dither;
if (lvds_options->panel_type == 0xff)
return;
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
if (!lvds_lfp_data)
return;
dev_priv->lvds_vbt = 1;
entry = &lvds_lfp_data->data[lvds_options->panel_type];
dvo_timing = &entry->dvo_timing;
panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode),
DRM_MEM_DRIVER);
panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
dvo_timing->hactive_lo;
panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
dvo_timing->hsync_pulse_width;
panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
dvo_timing->vactive_lo;
panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
dvo_timing->vsync_off;
panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
dvo_timing->vsync_pulse_width;
panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
panel_fixed_mode->clock = dvo_timing->clock * 10;
panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(panel_fixed_mode);
dev_priv->vbt_mode = panel_fixed_mode;
DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
return;
}
static void
parse_general_features(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_general_features *general;
/* Set sensible defaults in case we can't find the general block */
dev_priv->int_tv_support = 1;
dev_priv->int_crt_support = 1;
general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) {
dev_priv->int_tv_support = general->int_tv_support;
dev_priv->int_crt_support = general->int_crt_support;
}
}
/**
* intel_init_bios - initialize VBIOS settings & find VBT
* @dev: DRM device
*
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
* to appropriate values.
*
* VBT existence is a sanity check that is relied on by other i830_bios.c code.
* Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
* feed an updated VBT back through that, compared to what we'll fetch using
* this method of groping around in the BIOS data.
*
* Returns 0 on success, nonzero on failure.
*/
bool
intel_init_bios(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
struct vbt_header *vbt = NULL;
struct bdb_header *bdb;
u8 __iomem *bios;
size_t size;
int i;
bios = pci_map_rom(pdev, &size);
if (!bios)
return -1;
/* Scour memory looking for the VBT signature */
for (i = 0; i + 4 < size; i++) {
if (!memcmp(bios + i, "$VBT", 4)) {
vbt = (struct vbt_header *)(bios + i);
break;
}
}
if (!vbt) {
DRM_ERROR("VBT signature missing\n");
pci_unmap_rom(pdev, bios);
return -1;
}
bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
parse_panel_data(dev_priv, bdb);
pci_unmap_rom(pdev, bios);
return 0;
}

405
linux-core/intel_bios.h Normal file
View File

@ -0,0 +1,405 @@
/*
* Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#ifndef _I830_BIOS_H_
#define _I830_BIOS_H_
#include "drmP.h"
struct vbt_header {
u8 signature[20]; /**< Always starts with 'VBT$' */
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 vbt_size; /**< in bytes */
u8 vbt_checksum;
u8 reserved0;
u32 bdb_offset; /**< from beginning of VBT */
u32 aim_offset[4]; /**< from beginning of VBT */
} __attribute__((packed));
struct bdb_header {
u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 bdb_size; /**< in bytes */
};
/* strictly speaking, this is a "skip" block, but it has interesting info */
struct vbios_data {
u8 type; /* 0 == desktop, 1 == mobile */
u8 relstage;
u8 chipset;
u8 lvds_present:1;
u8 tv_present:1;
u8 rsvd2:6; /* finish byte */
u8 rsvd3[4];
u8 signon[155];
u8 copyright[61];
u16 code_segment;
u8 dos_boot_mode;
u8 bandwidth_percent;
u8 rsvd4; /* popup memory size */
u8 resize_pci_bios;
u8 rsvd5; /* is crt already on ddc2 */
} __attribute__((packed));
/*
* There are several types of BIOS data blocks (BDBs), each block has
* an ID and size in the first 3 bytes (ID in first, size in next 2).
* Known types are listed below.
*/
#define BDB_GENERAL_FEATURES 1
#define BDB_GENERAL_DEFINITIONS 2
#define BDB_OLD_TOGGLE_LIST 3
#define BDB_MODE_SUPPORT_LIST 4
#define BDB_GENERIC_MODE_TABLE 5
#define BDB_EXT_MMIO_REGS 6
#define BDB_SWF_IO 7
#define BDB_SWF_MMIO 8
#define BDB_DOT_CLOCK_TABLE 9
#define BDB_MODE_REMOVAL_TABLE 10
#define BDB_CHILD_DEVICE_TABLE 11
#define BDB_DRIVER_FEATURES 12
#define BDB_DRIVER_PERSISTENCE 13
#define BDB_EXT_TABLE_PTRS 14
#define BDB_DOT_CLOCK_OVERRIDE 15
#define BDB_DISPLAY_SELECT 16
/* 17 rsvd */
#define BDB_DRIVER_ROTATION 18
#define BDB_DISPLAY_REMOVE 19
#define BDB_OEM_CUSTOM 20
#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
#define BDB_SDVO_LVDS_OPTIONS 22
#define BDB_SDVO_PANEL_DTDS 23
#define BDB_SDVO_LVDS_PNP_IDS 24
#define BDB_SDVO_LVDS_POWER_SEQ 25
#define BDB_TV_OPTIONS 26
#define BDB_LVDS_OPTIONS 40
#define BDB_LVDS_LFP_DATA_PTRS 41
#define BDB_LVDS_LFP_DATA 42
#define BDB_LVDS_BACKLIGHT 43
#define BDB_LVDS_POWER 44
#define BDB_SKIP 254 /* VBIOS private block, ignore */
struct bdb_general_features {
/* bits 1 */
u8 panel_fitting:2;
u8 flexaim:1;
u8 msg_enable:1;
u8 clear_screen:3;
u8 color_flip:1;
/* bits 2 */
u8 download_ext_vbt:1;
u8 enable_ssc:1;
u8 ssc_freq:1;
u8 enable_lfp_on_override:1;
u8 disable_ssc_ddt:1;
u8 rsvd8:3; /* finish byte */
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
u8 rsvd9:6; /* finish byte */
/* bits 4 */
u8 legacy_monitor_detect;
/* bits 5 */
u8 int_crt_support:1;
u8 int_tv_support:1;
u8 rsvd11:6; /* finish byte */
} __attribute__((packed));
struct bdb_general_definitions {
/* DDC GPIO */
u8 crt_ddc_gmbus_pin;
/* DPMS bits */
u8 dpms_acpi:1;
u8 skip_boot_crt_detect:1;
u8 dpms_aim:1;
u8 rsvd1:5; /* finish byte */
/* boot device bits */
u8 boot_display[2];
u8 child_dev_size;
/* device info */
u8 tv_or_lvds_info[33];
u8 dev1[33];
u8 dev2[33];
u8 dev3[33];
u8 dev4[33];
/* may be another device block here on some platforms */
};
struct bdb_lvds_options {
u8 panel_type;
u8 rsvd1;
/* LVDS capabilities, stored in a dword */
u8 rsvd2:1;
u8 lvds_edid:1;
u8 pixel_dither:1;
u8 pfit_ratio_auto:1;
u8 pfit_gfx_mode_enhanced:1;
u8 pfit_text_mode_enhanced:1;
u8 pfit_mode:2;
u8 rsvd4;
} __attribute__((packed));
/* LFP pointer table contains entries to the struct below */
struct bdb_lvds_lfp_data_ptr {
u16 fp_timing_offset; /* offsets are from start of bdb */
u8 fp_table_size;
u16 dvo_timing_offset;
u8 dvo_table_size;
u16 panel_pnp_id_offset;
u8 pnp_table_size;
} __attribute__((packed));
struct bdb_lvds_lfp_data_ptrs {
u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
struct bdb_lvds_lfp_data_ptr ptr[16];
} __attribute__((packed));
/* LFP data has 3 blocks per entry */
struct lvds_fp_timing {
u16 x_res;
u16 y_res;
u32 lvds_reg;
u32 lvds_reg_val;
u32 pp_on_reg;
u32 pp_on_reg_val;
u32 pp_off_reg;
u32 pp_off_reg_val;
u32 pp_cycle_reg;
u32 pp_cycle_reg_val;
u32 pfit_reg;
u32 pfit_reg_val;
u16 terminator;
} __attribute__((packed));
struct lvds_dvo_timing {
u16 clock; /**< In 10khz */
u8 hactive_lo;
u8 hblank_lo;
u8 hblank_hi:4;
u8 hactive_hi:4;
u8 vactive_lo;
u8 vblank_lo;
u8 vblank_hi:4;
u8 vactive_hi:4;
u8 hsync_off_lo;
u8 hsync_pulse_width;
u8 vsync_pulse_width:4;
u8 vsync_off:4;
u8 rsvd0:6;
u8 hsync_off_hi:2;
u8 h_image;
u8 v_image;
u8 max_hv;
u8 h_border;
u8 v_border;
u8 rsvd1:3;
u8 digital:2;
u8 vsync_positive:1;
u8 hsync_positive:1;
u8 rsvd2:1;
} __attribute__((packed));
struct lvds_pnp_id {
u16 mfg_name;
u16 product_code;
u32 serial;
u8 mfg_week;
u8 mfg_year;
} __attribute__((packed));
struct bdb_lvds_lfp_data_entry {
struct lvds_fp_timing fp_timing;
struct lvds_dvo_timing dvo_timing;
struct lvds_pnp_id pnp_id;
} __attribute__((packed));
struct bdb_lvds_lfp_data {
struct bdb_lvds_lfp_data_entry data[16];
} __attribute__((packed));
struct aimdb_header {
char signature[16];
char oem_device[20];
u16 aimdb_version;
u16 aimdb_header_size;
u16 aimdb_size;
} __attribute__((packed));
struct aimdb_block {
u8 aimdb_id;
u16 aimdb_size;
} __attribute__((packed));
struct vch_panel_data {
u16 fp_timing_offset;
u8 fp_timing_size;
u16 dvo_timing_offset;
u8 dvo_timing_size;
u16 text_fitting_offset;
u8 text_fitting_size;
u16 graphics_fitting_offset;
u8 graphics_fitting_size;
} __attribute__((packed));
struct vch_bdb_22 {
struct aimdb_block aimdb_block;
struct vch_panel_data panels[16];
} __attribute__((packed));
bool intel_init_bios(struct drm_device *dev);
/*
* Driver<->VBIOS interaction occurs through scratch bits in
* GR18 & SWF*.
*/
/* GR18 bits are set on display switch and hotkey events */
#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
#define GR18_HK_NONE (0x0<<3)
#define GR18_HK_LFP_STRETCH (0x1<<3)
#define GR18_HK_TOGGLE_DISP (0x2<<3)
#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
#define GR18_HK_POPUP_DISABLED (0x6<<3)
#define GR18_HK_POPUP_ENABLED (0x7<<3)
#define GR18_HK_PFIT (0x8<<3)
#define GR18_HK_APM_CHANGE (0xa<<3)
#define GR18_HK_MULTIPLE (0xc<<3)
#define GR18_USER_INT_EN (1<<2)
#define GR18_A0000_FLUSH_EN (1<<1)
#define GR18_SMM_EN (1<<0)
/* Set by driver, cleared by VBIOS */
#define SWF00_YRES_SHIFT 16
#define SWF00_XRES_SHIFT 0
#define SWF00_RES_MASK 0xffff
/* Set by VBIOS at boot time and driver at runtime */
#define SWF01_TV2_FORMAT_SHIFT 8
#define SWF01_TV1_FORMAT_SHIFT 0
#define SWF01_TV_FORMAT_MASK 0xffff
#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
#define SWF10_GTT_OVERRIDE_EN (1<<28)
#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
#define SWF10_OLD_TOGGLE 0x0
#define SWF10_TOGGLE_LIST_1 0x1
#define SWF10_TOGGLE_LIST_2 0x2
#define SWF10_TOGGLE_LIST_3 0x3
#define SWF10_TOGGLE_LIST_4 0x4
#define SWF10_PANNING_EN (1<<23)
#define SWF10_DRIVER_LOADED (1<<22)
#define SWF10_EXTENDED_DESKTOP (1<<21)
#define SWF10_EXCLUSIVE_MODE (1<<20)
#define SWF10_OVERLAY_EN (1<<19)
#define SWF10_PLANEB_HOLDOFF (1<<18)
#define SWF10_PLANEA_HOLDOFF (1<<17)
#define SWF10_VGA_HOLDOFF (1<<16)
#define SWF10_ACTIVE_DISP_MASK 0xffff
#define SWF10_PIPEB_LFP2 (1<<15)
#define SWF10_PIPEB_EFP2 (1<<14)
#define SWF10_PIPEB_TV2 (1<<13)
#define SWF10_PIPEB_CRT2 (1<<12)
#define SWF10_PIPEB_LFP (1<<11)
#define SWF10_PIPEB_EFP (1<<10)
#define SWF10_PIPEB_TV (1<<9)
#define SWF10_PIPEB_CRT (1<<8)
#define SWF10_PIPEA_LFP2 (1<<7)
#define SWF10_PIPEA_EFP2 (1<<6)
#define SWF10_PIPEA_TV2 (1<<5)
#define SWF10_PIPEA_CRT2 (1<<4)
#define SWF10_PIPEA_LFP (1<<3)
#define SWF10_PIPEA_EFP (1<<2)
#define SWF10_PIPEA_TV (1<<1)
#define SWF10_PIPEA_CRT (1<<0)
#define SWF11_MEMORY_SIZE_SHIFT 16
#define SWF11_SV_TEST_EN (1<<15)
#define SWF11_IS_AGP (1<<14)
#define SWF11_DISPLAY_HOLDOFF (1<<13)
#define SWF11_DPMS_REDUCED (1<<12)
#define SWF11_IS_VBE_MODE (1<<11)
#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
#define SWF11_DPMS_MASK 0x07
#define SWF11_DPMS_OFF (1<<2)
#define SWF11_DPMS_SUSPEND (1<<1)
#define SWF11_DPMS_STANDBY (1<<0)
#define SWF11_DPMS_ON 0
#define SWF14_GFX_PFIT_EN (1<<31)
#define SWF14_TEXT_PFIT_EN (1<<30)
#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
#define SWF14_POPUP_EN (1<<28)
#define SWF14_DISPLAY_HOLDOFF (1<<27)
#define SWF14_DISP_DETECT_EN (1<<26)
#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
#define SWF14_DRIVER_STATUS (1<<24)
#define SWF14_OS_TYPE_WIN9X (1<<23)
#define SWF14_OS_TYPE_WINNT (1<<22)
/* 21:19 rsvd */
#define SWF14_PM_TYPE_MASK 0x00070000
#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
#define SWF14_PM_ACPI (0x3 << 16)
#define SWF14_PM_APM_12 (0x2 << 16)
#define SWF14_PM_APM_11 (0x1 << 16)
#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
/* if GR18 indicates a display switch */
#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
#define SWF14_DS_PIPEB_TV2_EN (1<<13)
#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
#define SWF14_DS_PIPEB_LFP_EN (1<<11)
#define SWF14_DS_PIPEB_EFP_EN (1<<10)
#define SWF14_DS_PIPEB_TV_EN (1<<9)
#define SWF14_DS_PIPEB_CRT_EN (1<<8)
#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
#define SWF14_DS_PIPEA_TV2_EN (1<<5)
#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
#define SWF14_DS_PIPEA_LFP_EN (1<<3)
#define SWF14_DS_PIPEA_EFP_EN (1<<2)
#define SWF14_DS_PIPEA_TV_EN (1<<1)
#define SWF14_DS_PIPEA_CRT_EN (1<<0)
/* if GR18 indicates a panel fitting request */
#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
/* if GR18 indicates an APM change request */
#define SWF14_APM_HIBERNATE 0x4
#define SWF14_APM_SUSPEND 0x3
#define SWF14_APM_STANDBY 0x1
#define SWF14_APM_RESTORE 0x0
#endif /* _I830_BIOS_H_ */

296
linux-core/intel_crt.c Normal file
View File

@ -0,0 +1,296 @@
/*
* Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
#include <linux/i2c.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp;
temp = I915_READ(ADPA);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
switch(mode) {
case DRM_MODE_DPMS_ON:
temp |= ADPA_DAC_ENABLE;
break;
case DRM_MODE_DPMS_STANDBY:
temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
break;
case DRM_MODE_DPMS_SUSPEND:
temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
break;
case DRM_MODE_DPMS_OFF:
temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
break;
}
I915_WRITE(ADPA, temp);
}
static void intel_crt_save(struct drm_connector *connector)
{
}
static void intel_crt_restore(struct drm_connector *connector)
{
}
static int intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (mode->clock > 400000 || mode->clock < 25000)
return MODE_CLOCK_RANGE;
return MODE_OK;
}
static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_md_reg;
u32 adpa, dpll_md;
if (intel_crtc->pipe == 0)
dpll_md_reg = DPLL_A_MD;
else
dpll_md_reg = DPLL_B_MD;
/*
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
*/
if (IS_I965G(dev)) {
dpll_md = I915_READ(dpll_md_reg);
I915_WRITE(dpll_md_reg,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
}
adpa = 0;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
if (intel_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
I915_WRITE(ADPA, adpa);
}
/**
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
*
* Not for i915G/i915GM
*
* \return true if CRT is connected.
* \return false if CRT is disconnected.
*/
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp;
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
temp = I915_READ(PORT_HOTPLUG_EN);
I915_WRITE(PORT_HOTPLUG_EN,
temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
do {
if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
break;
msleep(1);
} while (time_after(timeout, jiffies));
if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
CRT_HOTPLUG_MONITOR_COLOR)
return true;
return false;
}
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
/* CRT should always be at 0, but check anyway */
if (intel_output->type != INTEL_OUTPUT_ANALOG)
return false;
return intel_ddc_probe(intel_output);
}
static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
if (intel_crt_detect_hotplug(connector))
return connector_status_connected;
else
return connector_status_disconnected;
}
if (intel_crt_detect_ddc(connector))
return connector_status_connected;
/* TODO use load detect */
return connector_status_unknown;
}
static void intel_crt_destroy(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
intel_i2c_destroy(intel_output->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
return intel_ddc_get_modes(intel_output);
}
static int intel_crt_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
struct drm_device *dev = connector->dev;
if (property == dev->mode_config.dpms_property && connector->encoder)
intel_crt_dpms(connector->encoder, (uint32_t)(value & 0xf));
return 0;
}
/*
* Routines for controlling stuff on the analog port
*/
static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
.dpms = intel_crt_dpms,
.mode_fixup = intel_crt_mode_fixup,
.prepare = intel_encoder_prepare,
.commit = intel_encoder_commit,
.mode_set = intel_crt_mode_set,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.save = intel_crt_save,
.restore = intel_crt_restore,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
.set_property = intel_crt_set_property,
};
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
.best_encoder = intel_best_encoder,
};
void intel_crt_enc_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
}
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_crt_enc_destroy,
};
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
struct intel_output *intel_output;
intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
if (!intel_output)
return;
connector = &intel_output->base;
drm_connector_init(dev, &intel_output->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
drm_mode_connector_attach_encoder(&intel_output->base,
&intel_output->enc);
/* Set up the DDC bus. */
intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
if (!intel_output->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
"failed.\n");
return;
}
intel_output->type = INTEL_OUTPUT_ANALOG;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
}

1636
linux-core/intel_display.c Normal file

File diff suppressed because it is too large Load Diff

125
linux-core/intel_drv.h Normal file
View File

@ -0,0 +1,125 @@
/*
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
* Copyright (c) 2007 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*/
#ifndef __INTEL_DRV_H__
#define __INTEL_DRV_H__
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
/*
* Display related stuff
*/
/* store information about an Ixxx DVO */
/* The i830->i865 use multiple DVOs with multiple i2cs */
/* the i915, i945 have a single sDVO i2c bus - which is different */
#define MAX_OUTPUTS 6
/* maximum connectors per crtcs in the mode set */
#define INTELFB_CONN_LIMIT 4
#define INTEL_I2C_BUS_DVO 1
#define INTEL_I2C_BUS_SDVO 2
/* these are outputs from the chip - integrated only
external chips are via DVO or SDVO output */
#define INTEL_OUTPUT_UNUSED 0
#define INTEL_OUTPUT_ANALOG 1
#define INTEL_OUTPUT_DVO 2
#define INTEL_OUTPUT_SDVO 3
#define INTEL_OUTPUT_LVDS 4
#define INTEL_OUTPUT_TVOUT 5
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
struct intel_i2c_chan {
struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
u32 reg; /* GPIO reg */
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo;
u8 slave_addr;
};
struct intel_framebuffer {
struct drm_framebuffer base;
};
struct intel_output {
struct drm_connector base;
struct drm_encoder enc;
int type;
struct intel_i2c_chan *i2c_bus; /* for control functions */
struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
bool load_detect_temp;
void *dev_priv;
};
struct intel_crtc {
struct drm_crtc base;
int pipe;
int plane;
uint32_t cursor_addr;
u8 lut_r[256], lut_g[256], lut_b[256];
int dpms_mode;
struct intel_framebuffer *fbdev_fb;
/* a mode_set for fbdev users on this crtc */
struct drm_mode_set mode_set;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_output(x) container_of(x, struct intel_output, base)
#define enc_to_intel_output(x) container_of(x, struct intel_output, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name);
void intel_i2c_destroy(struct intel_i2c_chan *chan);
int intel_ddc_get_modes(struct intel_output *intel_output);
extern bool intel_ddc_probe(struct intel_output *intel_output);
extern void intel_crt_init(struct drm_device *dev);
extern void intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_lvds_init(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
extern void intel_wait_for_vblank(struct drm_device *dev);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
struct drm_display_mode *mode,
int *dpms_mode);
extern void intel_release_load_detect_pipe(struct intel_output *intel_output,
int dpms_mode);
extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
extern int intelfb_probe(struct drm_device *dev);
extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
void *mm_private);
#endif /* __INTEL_DRV_H__ */

507
linux-core/intel_dvo.c Normal file
View File

@ -0,0 +1,507 @@
/*
* Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
/*
* Copyright 2006 Dave Airlie <airlied@linux.ie>
*/
#include <linux/i2c.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "dvo.h"
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
extern struct intel_dvo_dev_ops sil164_ops;
extern struct intel_dvo_dev_ops ch7xxx_ops;
extern struct intel_dvo_dev_ops ivch_ops;
extern struct intel_dvo_dev_ops tfp410_ops;
extern struct intel_dvo_dev_ops ch7017_ops;
struct intel_dvo_device intel_dvo_devices[] = {
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "sil164",
.dvo_reg = DVOC,
.slave_addr = SIL164_ADDR,
.dev_ops = &sil164_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
.dvo_reg = DVOC,
.slave_addr = CH7xxx_ADDR,
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ivch",
.dvo_reg = DVOA,
.slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
.dev_ops = &ivch_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "tfp410",
.dvo_reg = DVOC,
.slave_addr = TFP410_ADDR,
.dev_ops = &tfp410_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ch7017",
.dvo_reg = DVOC,
.slave_addr = 0x75,
.gpio = GPIOE,
.dev_ops = &ch7017_ops,
}
};
static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_output *intel_output = enc_to_intel_output(encoder);
struct intel_dvo_device *dvo = intel_output->dev_priv;
u32 dvo_reg = dvo->dvo_reg;
u32 temp = I915_READ(dvo_reg);
if (mode == DRM_MODE_DPMS_ON) {
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
dvo->dev_ops->dpms(dvo, mode);
} else {
dvo->dev_ops->dpms(dvo, mode);
I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
I915_READ(dvo_reg);
}
}
static void intel_dvo_save(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dvo_device *dvo = intel_output->dev_priv;
/* Each output should probably just save the registers it touches,
* but for now, use more overkill.
*/
dev_priv->saveDVOA = I915_READ(DVOA);
dev_priv->saveDVOB = I915_READ(DVOB);
dev_priv->saveDVOC = I915_READ(DVOC);
dvo->dev_ops->save(dvo);
}
static void intel_dvo_restore(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dvo_device *dvo = intel_output->dev_priv;
dvo->dev_ops->restore(dvo);
I915_WRITE(DVOA, dev_priv->saveDVOA);
I915_WRITE(DVOB, dev_priv->saveDVOB);
I915_WRITE(DVOC, dev_priv->saveDVOC);
}
static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dvo_device *dvo = intel_output->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* XXX: Validate clock range */
if (dvo->panel_fixed_mode) {
if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay)
return MODE_PANEL;
}
return dvo->dev_ops->mode_valid(dvo, mode);
}
static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_output *intel_output = enc_to_intel_output(encoder);
struct intel_dvo_device *dvo = intel_output->dev_priv;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
if (dvo->panel_fixed_mode != NULL) {
#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x
C(hdisplay);
C(hsync_start);
C(hsync_end);
C(htotal);
C(vdisplay);
C(vsync_start);
C(vsync_end);
C(vtotal);
C(clock);
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
#undef C
}
if (dvo->dev_ops->mode_fixup)
return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode);
return true;
}
static void intel_dvo_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_output *intel_output = enc_to_intel_output(encoder);
struct intel_dvo_device *dvo = intel_output->dev_priv;
int pipe = intel_crtc->pipe;
u32 dvo_val;
u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
switch (dvo_reg) {
case DVOA:
default:
dvo_srcdim_reg = DVOA_SRCDIM;
break;
case DVOB:
dvo_srcdim_reg = DVOB_SRCDIM;
break;
case DVOC:
dvo_srcdim_reg = DVOC_SRCDIM;
break;
}
dvo->dev_ops->mode_set(dvo, mode, adjusted_mode);
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
(DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
DVO_BLANK_ACTIVE_HIGH;
if (pipe == 1)
dvo_val |= DVO_PIPE_B_SELECT;
dvo_val |= DVO_PIPE_STALL;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
/*I915_WRITE(DVOB_SRCDIM,
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
I915_WRITE(dvo_srcdim_reg,
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
/*I915_WRITE(DVOB, dvo_val);*/
I915_WRITE(dvo_reg, dvo_val);
}
/**
* Detect the output connection on our DVO device.
*
* Unimplemented.
*/
static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dvo_device *dvo = intel_output->dev_priv;
return dvo->dev_ops->detect(dvo);
}
static int intel_dvo_get_modes(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dvo_device *dvo = intel_output->dev_priv;
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
intel_ddc_get_modes(intel_output);
if (!list_empty(&connector->probed_modes))
return 1;
#if 0
if (intel_output->i2c_drv->vid_rec->get_modes)
{
modes = intel_output->i2c_drv->vid_rec->get_modes (intel_output->i2c_drv->dev_priv);
if (modes != NULL)
return modes;
}
#endif
if (dvo->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
}
return 0;
}
static void intel_dvo_destroy (struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dvo_device *dvo = intel_output->dev_priv;
if (dvo) {
if (dvo->dev_ops->destroy)
dvo->dev_ops->destroy(dvo);
if (dvo->panel_fixed_mode)
kfree(dvo->panel_fixed_mode);
/* no need, in i830_dvoices[] now */
//kfree(dvo);
}
if (intel_output->i2c_bus)
intel_i2c_destroy(intel_output->i2c_bus);
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(intel_output);
}
#ifdef RANDR_GET_CRTC_INTERFACE
static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dvo_device *dvo = intel_output->dev_priv;
int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
return intel_pipe_to_crtc(pScrn, pipe);
}
#endif
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.dpms = intel_dvo_dpms,
.mode_fixup = intel_dvo_mode_fixup,
.prepare = intel_encoder_prepare,
.mode_set = intel_dvo_mode_set,
.commit = intel_encoder_commit,
};
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.save = intel_dvo_save,
.restore = intel_dvo_restore,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
};
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
.mode_valid = intel_dvo_mode_valid,
.get_modes = intel_dvo_get_modes,
.best_encoder = intel_best_encoder,
};
void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
.destroy = intel_dvo_enc_destroy,
};
/**
* Attempts to get a fixed panel timing for LVDS (currently only the i830).
*
* Other chips with DVO LVDS will need to extend this to deal with the LVDS
* chip being on DVOB/C and having multiple pipes.
*/
static struct drm_display_mode *
intel_dvo_get_current_mode (struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dvo_device *dvo = intel_output->dev_priv;
uint32_t dvo_reg = dvo->dvo_reg;
uint32_t dvo_val = I915_READ(dvo_reg);
struct drm_display_mode *mode = NULL;
/* If the DVO port is active, that'll be the LVDS, so we can pull out
* its timings to get how the BIOS set up the panel.
*/
if (dvo_val & DVO_ENABLE) {
struct drm_crtc *crtc;
int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
crtc = intel_get_crtc_from_pipe(dev, pipe);
if (crtc) {
mode = intel_crtc_mode_get(dev, crtc);
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
if (dvo_val & DVO_VSYNC_ACTIVE_HIGH)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
}
}
return mode;
}
void intel_dvo_init(struct drm_device *dev)
{
struct intel_output *intel_output;
struct intel_dvo_device *dvo;
struct intel_i2c_chan *i2cbus = NULL;
int ret = 0;
int i;
int gpio_inited = 0;
int encoder_type = DRM_MODE_ENCODER_NONE;
intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL);
if (!intel_output)
return;
/* Set up the DDC bus */
intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
if (!intel_output->ddc_bus)
goto free_intel;
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_output->base;
int gpio;
dvo = &intel_dvo_devices[i];
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
* in the spec.
*/
if (dvo->gpio != 0)
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
gpio = GPIOB;
else
gpio = GPIOE;
/* Set up the I2C bus necessary for the chip we're probing.
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
if (gpio_inited != gpio) {
if (i2cbus != NULL)
intel_i2c_destroy(i2cbus);
if (!(i2cbus = intel_i2c_create(dev, gpio,
gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
continue;
}
gpio_inited = gpio;
}
if (dvo->dev_ops!= NULL)
ret = dvo->dev_ops->init(dvo, i2cbus);
else
ret = false;
if (!ret)
continue;
intel_output->type = INTEL_OUTPUT_DVO;
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
// connector = DRM_MODE_CONNECTOR_DVID;
drm_connector_init(dev, connector, &intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
// connector = DRM_MODE_CONNECTOR_LVDS;
drm_connector_init(dev, connector, &intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
encoder_type = DRM_MODE_ENCODER_LVDS;
break;
}
drm_connector_helper_add(connector, &intel_dvo_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
intel_output->dev_priv = dvo;
intel_output->i2c_bus = i2cbus;
drm_encoder_init(dev, &intel_output->enc, &intel_dvo_enc_funcs, encoder_type);
drm_encoder_helper_add(&intel_output->enc, &intel_dvo_helper_funcs);
drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
* to dig the fixed panel mode out of the BIOS data.
* However, it's in a different format from the BIOS
* data on chipsets with integrated LVDS (stored in AIM
* headers, likely), so for now, just get the current
* mode being output through DVO.
*/
dvo->panel_fixed_mode = intel_dvo_get_current_mode(connector);
dvo->panel_wants_dither = true;
}
drm_sysfs_connector_add(connector);
return;
}
intel_i2c_destroy(intel_output->ddc_bus);
/* Didn't find a chip, so tear down. */
if (i2cbus != NULL)
intel_i2c_destroy(i2cbus);
free_intel:
kfree(intel_output);
}

1148
linux-core/intel_fb.c Normal file

File diff suppressed because it is too large Load Diff

190
linux-core/intel_i2c.c Normal file
View File

@ -0,0 +1,190 @@
/*
* Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
/*
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
/*
* Intel GPIO access functions
*/
#define I2C_RISEFALL_TIME 20
static int get_clock(void *data)
{
struct intel_i2c_chan *chan = data;
struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
u32 val;
val = I915_READ(chan->reg);
return ((val & GPIO_CLOCK_VAL_IN) != 0);
}
static int get_data(void *data)
{
struct intel_i2c_chan *chan = data;
struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
u32 val;
val = I915_READ(chan->reg);
return ((val & GPIO_DATA_VAL_IN) != 0);
}
static void set_clock(void *data, int state_high)
{
struct intel_i2c_chan *chan = data;
struct drm_device *dev = chan->drm_dev;
struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
u32 reserved = 0, clock_bits;
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
if (state_high)
clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
else
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
I915_WRITE(chan->reg, reserved | clock_bits);
udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
}
static void set_data(void *data, int state_high)
{
struct intel_i2c_chan *chan = data;
struct drm_device *dev = chan->drm_dev;
struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
u32 reserved = 0, data_bits;
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
if (state_high)
data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
else
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
I915_WRITE(chan->reg, reserved | data_bits);
udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
}
/**
* intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
* @dev: DRM device
* @output: driver specific output device
* @reg: GPIO reg to use
* @name: name for this bus
*
* Creates and registers a new i2c bus with the Linux i2c layer, for use
* in output probing and control (e.g. DDC or SDVO control functions).
*
* Possible values for @reg include:
* %GPIOA
* %GPIOB
* %GPIOC
* %GPIOD
* %GPIOE
* %GPIOF
* %GPIOG
* %GPIOH
* see PRM for details on how these different busses are used.
*/
struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name)
{
struct intel_i2c_chan *chan;
chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
if (!chan)
goto out_free;
chan->drm_dev = dev;
chan->reg = reg;
snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
chan->adapter.owner = THIS_MODULE;
#ifndef I2C_HW_B_INTELFB
#define I2C_HW_B_INTELFB I2C_HW_B_I810
#endif
chan->adapter.id = I2C_HW_B_INTELFB;
chan->adapter.algo_data = &chan->algo;
chan->adapter.dev.parent = &dev->pdev->dev;
chan->algo.setsda = set_data;
chan->algo.setscl = set_clock;
chan->algo.getsda = get_data;
chan->algo.getscl = get_clock;
chan->algo.udelay = 20;
chan->algo.timeout = usecs_to_jiffies(2200);
chan->algo.data = chan;
i2c_set_adapdata(&chan->adapter, chan);
if(i2c_bit_add_bus(&chan->adapter))
goto out_free;
/* JJJ: raise SCL and SDA? */
set_data(chan, 1);
set_clock(chan, 1);
udelay(20);
return chan;
out_free:
kfree(chan);
return NULL;
}
/**
* intel_i2c_destroy - unregister and free i2c bus resources
* @output: channel to free
*
* Unregister the adapter from the i2c layer, then free the structure.
*/
void intel_i2c_destroy(struct intel_i2c_chan *chan)
{
if (!chan)
return;
i2c_del_adapter(&chan->adapter);
kfree(chan);
}

515
linux-core/intel_lvds.c Normal file
View File

@ -0,0 +1,515 @@
/*
* Copyright © 2006-2007 Intel Corporation
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include <linux/i2c.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
/**
* Sets the backlight level.
*
* \param level backlight level, from 0 to intel_lvds_get_max_backlight().
*/
static void intel_lvds_set_backlight(struct drm_device *dev, int level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blc_pwm_ctl;
blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
}
/**
* Returns the maximum level of the backlight duty cycle field.
*/
static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
}
/**
* Sets the power state for the panel.
*/
static void intel_lvds_set_power(struct drm_device *dev, bool on)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_status;
if (on) {
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
POWER_TARGET_ON);
do {
pp_status = I915_READ(PP_STATUS);
} while ((pp_status & PP_ON) == 0);
intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
} else {
intel_lvds_set_backlight(dev, 0);
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) &
~POWER_TARGET_ON);
do {
pp_status = I915_READ(PP_STATUS);
} while (pp_status & PP_ON);
}
}
static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
if (mode == DRM_MODE_DPMS_ON)
intel_lvds_set_power(dev, true);
else
intel_lvds_set_power(dev, false);
/* XXX: We never power down the LVDS pairs. */
}
static void intel_lvds_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->savePP_ON = I915_READ(PP_ON_DELAYS);
dev_priv->savePP_OFF = I915_READ(PP_OFF_DELAYS);
dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
/*
* If the light is off at server startup, just make it full brightness
*/
if (dev_priv->backlight_duty_cycle == 0)
dev_priv->backlight_duty_cycle =
intel_lvds_get_max_backlight(dev);
}
static void intel_lvds_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON);
I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF);
I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
intel_lvds_set_power(dev, true);
else
intel_lvds_set_power(dev, false);
}
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
if (fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
return MODE_OK;
}
static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
/* Should never happen!! */
if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
printk(KERN_ERR "Can't support LVDS on pipe A\n");
return false;
}
/* Should never happen!! */
list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
printk(KERN_ERR "Can't enable LVDS and another "
"encoder on the same pipe\n");
return false;
}
}
/*
* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
if (dev_priv->panel_fixed_mode != NULL) {
adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
adjusted_mode->hsync_start =
dev_priv->panel_fixed_mode->hsync_start;
adjusted_mode->hsync_end =
dev_priv->panel_fixed_mode->hsync_end;
adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
adjusted_mode->vsync_start =
dev_priv->panel_fixed_mode->vsync_start;
adjusted_mode->vsync_end =
dev_priv->panel_fixed_mode->vsync_end;
adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
}
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
* user's requested refresh rate.
*/
return true;
}
static void intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
intel_lvds_set_power(dev, false);
}
static void intel_lvds_commit( struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->backlight_duty_cycle == 0)
dev_priv->backlight_duty_cycle =
intel_lvds_get_max_backlight(dev);
intel_lvds_set_power(dev, true);
}
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 pfit_control;
/*
* The LVDS pin pair will already have been turned on in the
* intel_crtc_mode_set since it has a large impact on the DPLL
* settings.
*/
/*
* Enable automatic panel scaling so that non-native modes fill the
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
if (mode->hdisplay != adjusted_mode->hdisplay ||
mode->vdisplay != adjusted_mode->vdisplay)
pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
else
pfit_control = 0;
if (!IS_I965G(dev)) {
if (dev_priv->panel_wants_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
}
else
pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
I915_WRITE(PFIT_CONTROL, pfit_control);
}
/**
* Detect the LVDS connection.
*
* This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have
* been set up if the LVDS was actually connected anyway.
*/
static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
{
return connector_status_connected;
}
/**
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_output *intel_output = to_intel_output(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
ret = intel_ddc_get_modes(intel_output);
if (ret)
return ret;
/* Didn't get an EDID, so
* Set wide sync ranges so we get all modes
* handed to valid_mode for checking
*/
connector->display_info.min_vfreq = 0;
connector->display_info.max_vfreq = 200;
connector->display_info.min_hfreq = 0;
connector->display_info.max_hfreq = 200;
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode =
drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
drm_mode_probed_add(connector, mode);
return 1;
}
return 0;
}
/**
* intel_lvds_destroy - unregister and free LVDS structures
* @connector: connector to free
*
* Unregister the DDC bus for this connector then free the driver private
* structure.
*/
static void intel_lvds_destroy(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
.dpms = intel_lvds_dpms,
.mode_fixup = intel_lvds_mode_fixup,
.prepare = intel_lvds_prepare,
.mode_set = intel_lvds_mode_set,
.commit = intel_lvds_commit,
};
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
.best_encoder = intel_best_encoder,
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.save = intel_lvds_save,
.restore = intel_lvds_restore,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_lvds_destroy,
};
static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
}
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
.destroy = intel_lvds_enc_destroy,
};
/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_output *intel_output;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
u32 lvds;
int pipe;
intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
if (!intel_output) {
return;
}
connector = &intel_output->base;
encoder = &intel_output->enc;
drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
intel_output->type = INTEL_OUTPUT_LVDS;
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
/*
* LVDS discovery:
* 1) check for EDID on DDC
* 2) check for VBT data
* 3) check to see if LVDS is already on
* if none of the above, no panel
* 4) make sure lid is open
* if closed, act like it's not there for now
*/
/* Set up the DDC bus. */
intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
if (!intel_output->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
"failed.\n");
goto failed;
}
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
intel_ddc_get_modes(intel_output);
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
dev_priv->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
goto out; /* FIXME: check for quirks */
}
}
/* Failed to get EDID, what about VBT? */
if (dev_priv->vbt_mode)
dev_priv->panel_fixed_mode =
drm_mode_duplicate(dev, dev_priv->vbt_mode);
/*
* If we didn't get EDID, try checking if the panel is already turned
* on. If so, assume that whatever is currently programmed is the
* correct mode.
*/
lvds = I915_READ(LVDS);
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
crtc = intel_get_crtc_from_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
if (dev_priv->panel_fixed_mode) {
dev_priv->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
goto out; /* FIXME: check for quirks */
}
}
/* If we still don't have a mode after all that, give up. */
if (!dev_priv->panel_fixed_mode)
goto failed;
/* FIXME: detect aopen & mac mini type stuff automatically? */
/*
* Blacklist machines with BIOSes that list an LVDS panel without
* actually having one.
*/
if (IS_I945GM(dev)) {
/* aopen mini pc */
if (dev->pdev->subsystem_vendor == 0xa0a0)
goto failed;
if ((dev->pdev->subsystem_vendor == 0x8086) &&
(dev->pdev->subsystem_device == 0x7270)) {
/* It's a Mac Mini or Macbook Pro.
*
* Apple hardware is out to get us. The macbook pro
* has a real LVDS panel, but the mac mini does not,
* and they have the same device IDs. We'll
* distinguish by panel size, on the assumption
* that Apple isn't about to make any machines with an
* 800x600 display.
*/
if (dev_priv->panel_fixed_mode != NULL &&
dev_priv->panel_fixed_mode->hdisplay == 800 &&
dev_priv->panel_fixed_mode->vdisplay == 600) {
DRM_DEBUG("Suspected Mac Mini, ignoring the LVDS\n");
goto failed;
}
}
}
out:
drm_sysfs_connector_add(connector);
return;
failed:
DRM_DEBUG("No LVDS modes found, disabling.\n");
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
drm_connector_cleanup(connector);
kfree(connector);
}

61
linux-core/intel_modes.c Normal file
View File

@ -0,0 +1,61 @@
/*
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
* Copyright (c) 2007 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include <linux/i2c.h>
#include <linux/fb.h>
#include "drmP.h"
#include "intel_drv.h"
/**
* intel_ddc_probe
*
*/
bool intel_ddc_probe(struct intel_output *intel_output)
{
u8 out_buf[] = { 0x0, 0x0};
u8 buf[2];
int ret;
struct i2c_msg msgs[] = {
{
.addr = 0x50,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = 0x50,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
}
};
ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2);
if (ret == 2)
return true;
return false;
}
/**
* intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
*
* Fetch the EDID information from @connector using the DDC bus.
*/
int intel_ddc_get_modes(struct intel_output *intel_output)
{
struct edid *edid;
int ret = 0;
edid = drm_get_edid(&intel_output->base, &intel_output->ddc_bus->adapter);
if (edid) {
drm_mode_connector_update_edid_property(&intel_output->base, edid);
ret = drm_add_edid_modes(&intel_output->base, edid);
kfree(edid);
}
return ret;
}

1153
linux-core/intel_sdvo.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,328 @@
/*
* Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
/**
* @file SDVO command definitions and structures.
*/
#define SDVO_OUTPUT_FIRST (0)
#define SDVO_OUTPUT_TMDS0 (1 << 0)
#define SDVO_OUTPUT_RGB0 (1 << 1)
#define SDVO_OUTPUT_CVBS0 (1 << 2)
#define SDVO_OUTPUT_SVID0 (1 << 3)
#define SDVO_OUTPUT_YPRPB0 (1 << 4)
#define SDVO_OUTPUT_SCART0 (1 << 5)
#define SDVO_OUTPUT_LVDS0 (1 << 6)
#define SDVO_OUTPUT_TMDS1 (1 << 8)
#define SDVO_OUTPUT_RGB1 (1 << 9)
#define SDVO_OUTPUT_CVBS1 (1 << 10)
#define SDVO_OUTPUT_SVID1 (1 << 11)
#define SDVO_OUTPUT_YPRPB1 (1 << 12)
#define SDVO_OUTPUT_SCART1 (1 << 13)
#define SDVO_OUTPUT_LVDS1 (1 << 14)
#define SDVO_OUTPUT_LAST (14)
struct intel_sdvo_caps {
u8 vendor_id;
u8 device_id;
u8 device_rev_id;
u8 sdvo_version_major;
u8 sdvo_version_minor;
unsigned int sdvo_inputs_mask:2;
unsigned int smooth_scaling:1;
unsigned int sharp_scaling:1;
unsigned int up_scaling:1;
unsigned int down_scaling:1;
unsigned int stall_support:1;
unsigned int pad:1;
u16 output_flags;
} __attribute__((packed));
/** This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
struct {
u16 clock; /**< pixel clock, in 10kHz units */
u8 h_active; /**< lower 8 bits (pixels) */
u8 h_blank; /**< lower 8 bits (pixels) */
u8 h_high; /**< upper 4 bits each h_active, h_blank */
u8 v_active; /**< lower 8 bits (lines) */
u8 v_blank; /**< lower 8 bits (lines) */
u8 v_high; /**< upper 4 bits each v_active, v_blank */
} part1;
struct {
u8 h_sync_off; /**< lower 8 bits, from hblank start */
u8 h_sync_width; /**< lower 8 bits (pixels) */
/** lower 4 bits each vsync offset, vsync width */
u8 v_sync_off_width;
/**
* 2 high bits of hsync offset, 2 high bits of hsync width,
* bits 4-5 of vsync offset, and 2 high bits of vsync width.
*/
u8 sync_off_width_high;
u8 dtd_flags;
u8 sdvo_flags;
/** bits 6-7 of vsync offset at bits 6-7 */
u8 v_sync_off_high;
u8 reserved;
} part2;
} __attribute__((packed));
struct intel_sdvo_pixel_clock_range {
u16 min; /**< pixel clock, in 10kHz units */
u16 max; /**< pixel clock, in 10kHz units */
} __attribute__((packed));
struct intel_sdvo_preferred_input_timing_args {
u16 clock;
u16 width;
u16 height;
} __attribute__((packed));
/* I2C registers for SDVO */
#define SDVO_I2C_ARG_0 0x07
#define SDVO_I2C_ARG_1 0x06
#define SDVO_I2C_ARG_2 0x05
#define SDVO_I2C_ARG_3 0x04
#define SDVO_I2C_ARG_4 0x03
#define SDVO_I2C_ARG_5 0x02
#define SDVO_I2C_ARG_6 0x01
#define SDVO_I2C_ARG_7 0x00
#define SDVO_I2C_OPCODE 0x08
#define SDVO_I2C_CMD_STATUS 0x09
#define SDVO_I2C_RETURN_0 0x0a
#define SDVO_I2C_RETURN_1 0x0b
#define SDVO_I2C_RETURN_2 0x0c
#define SDVO_I2C_RETURN_3 0x0d
#define SDVO_I2C_RETURN_4 0x0e
#define SDVO_I2C_RETURN_5 0x0f
#define SDVO_I2C_RETURN_6 0x10
#define SDVO_I2C_RETURN_7 0x11
#define SDVO_I2C_VENDOR_BEGIN 0x20
/* Status results */
#define SDVO_CMD_STATUS_POWER_ON 0x0
#define SDVO_CMD_STATUS_SUCCESS 0x1
#define SDVO_CMD_STATUS_NOTSUPP 0x2
#define SDVO_CMD_STATUS_INVALID_ARG 0x3
#define SDVO_CMD_STATUS_PENDING 0x4
#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
/* SDVO commands, argument/result registers */
#define SDVO_CMD_RESET 0x01
/** Returns a struct intel_sdvo_caps */
#define SDVO_CMD_GET_DEVICE_CAPS 0x02
#define SDVO_CMD_GET_FIRMWARE_REV 0x86
# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
/**
* Reports which inputs are trained (managed to sync).
*
* Devices must have trained within 2 vsyncs of a mode change.
*/
#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
struct intel_sdvo_get_trained_inputs_response {
unsigned int input0_trained:1;
unsigned int input1_trained:1;
unsigned int pad:6;
} __attribute__((packed));
/** Returns a struct intel_sdvo_output_flags of active outputs. */
#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
/**
* Sets the current set of active outputs.
*
* Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
* on multi-output devices.
*/
#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
/**
* Returns the current mapping of SDVO inputs to outputs on the device.
*
* Returns two struct intel_sdvo_output_flags structures.
*/
#define SDVO_CMD_GET_IN_OUT_MAP 0x06
/**
* Sets the current mapping of SDVO inputs to outputs on the device.
*
* Takes two struct i380_sdvo_output_flags structures.
*/
#define SDVO_CMD_SET_IN_OUT_MAP 0x07
/**
* Returns a struct intel_sdvo_output_flags of attached displays.
*/
#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
/**
* Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
*/
#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
/**
* Takes a struct intel_sdvo_output_flags.
*/
#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
/**
* Returns a struct intel_sdvo_output_flags of displays with hot plug
* interrupts enabled.
*/
#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
struct intel_sdvo_get_interrupt_event_source_response {
u16 interrupt_status;
unsigned int ambient_light_interrupt:1;
unsigned int pad:7;
} __attribute__((packed));
/**
* Selects which input is affected by future input commands.
*
* Commands affected include SET_INPUT_TIMINGS_PART[12],
* GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
* GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
*/
#define SDVO_CMD_SET_TARGET_INPUT 0x10
struct intel_sdvo_set_target_input_args {
unsigned int target_1:1;
unsigned int pad:7;
} __attribute__((packed));
/**
* Takes a struct intel_sdvo_output_flags of which outputs are targetted by
* future output commands.
*
* Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
* GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
*/
#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
/* Part 1 */
# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
/* Part 2 */
# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
/**
* Generates a DTD based on the given width, height, and flags.
*
* This will be supported by any device supporting scaling or interlaced
* modes.
*/
#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
/** Returns a struct intel_sdvo_pixel_clock_range */
#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
/** Returns a struct intel_sdvo_pixel_clock_range */
#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
#define SDVO_CMD_GET_TV_FORMAT 0x28
#define SDVO_CMD_SET_TV_FORMAT 0x29
#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
# define SDVO_ENCODER_STATE_ON (1 << 0)
# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
# define SDVO_ENCODER_STATE_OFF (1 << 3)
#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
# define SDVO_CONTROL_BUS_PROM 0x0
# define SDVO_CONTROL_BUS_DDC1 0x1
# define SDVO_CONTROL_BUS_DDC2 0x2
# define SDVO_CONTROL_BUS_DDC3 0x3

1734
linux-core/intel_tv.c Normal file

File diff suppressed because it is too large Load Diff

854
linux-core/nouveau_bios.c Normal file
View File

@ -0,0 +1,854 @@
/*
* Copyright (C) 2005-2006 Erik Waling
* Copyright (C) 2006 Stephane Marchesin
* Copyright (C) 2007-2008 Stuart Bennett
* Copyright (C) 2008 Maarten Maathuis.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <asm/byteorder.h>
#include "nouveau_bios.h"
#include "nouveau_drv.h"
/* returns true if it mismatches */
static bool nv_checksum(const uint8_t *data, unsigned int length)
{
/* there's a few checksums in the BIOS, so here's a generic checking function */
int i;
uint8_t sum = 0;
for (i = 0; i < length; i++)
sum += data[i];
if (sum)
return true;
return false;
}
static int nv_valid_bios(struct drm_device *dev, uint8_t *data)
{
/* check for BIOS signature */
if (!(data[0] == 0x55 && data[1] == 0xAA)) {
DRM_ERROR("BIOS signature not found.\n");
return 0;
}
if (nv_checksum(data, data[2] * 512)) {
DRM_ERROR("BIOS checksum invalid.\n");
return 1;
}
return 2;
}
static void nv_shadow_bios_rom(struct drm_device *dev, uint8_t *data)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
/* enable access to rom */
NV_WRITE(NV04_PBUS_PCI_NV_20, NV04_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED);
/* This is also valid for pre-NV50, it just happened to be the only define already present. */
for (i=0; i < NV50_PROM__ESIZE; i++) {
/* Appearantly needed for a 6600GT/6800LE bug. */
data[i] = DRM_READ8(dev_priv->mmio, NV50_PROM + i);
data[i] = DRM_READ8(dev_priv->mmio, NV50_PROM + i);
data[i] = DRM_READ8(dev_priv->mmio, NV50_PROM + i);
data[i] = DRM_READ8(dev_priv->mmio, NV50_PROM + i);
data[i] = DRM_READ8(dev_priv->mmio, NV50_PROM + i);
}
/* disable access to rom */
NV_WRITE(NV04_PBUS_PCI_NV_20, NV04_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
}
static void nv_shadow_bios_ramin(struct drm_device *dev, uint8_t *data)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t old_bar0_pramin = 0;
int i;
/* Move the bios copy to the start of ramin? */
if (dev_priv->card_type >= NV_50) {
uint32_t vbios_vram = (NV_READ(0x619f04) & ~0xff) << 8;
if (!vbios_vram)
vbios_vram = (NV_READ(0x1700) << 16) + 0xf0000;
old_bar0_pramin = NV_READ(0x1700);
NV_WRITE(0x1700, vbios_vram >> 16);
}
for (i=0; i < NV50_PROM__ESIZE; i++)
data[i] = DRM_READ8(dev_priv->mmio, NV04_PRAMIN + i);
if (dev_priv->card_type >= NV_50)
NV_WRITE(0x1700, old_bar0_pramin);
}
static bool nv_shadow_bios(struct drm_device *dev, uint8_t *data)
{
nv_shadow_bios_rom(dev, data);
if (nv_valid_bios(dev, data) == 2)
return true;
nv_shadow_bios_ramin(dev, data);
if (nv_valid_bios(dev, data))
return true;
return false;
}
struct bit_entry {
uint8_t id[2];
uint16_t length;
uint16_t offset;
};
static int parse_bit_A_tbl_entry(struct drm_device *dev, struct bios *bios, struct bit_entry *bitentry)
{
/* Parses the load detect value table.
*
* Starting at bitentry->offset:
*
* offset + 0 (16 bits): table pointer
*/
uint16_t load_table_pointer;
if (bitentry->length != 3) {
DRM_ERROR("Do not understand BIT loadval table\n");
return 0;
}
load_table_pointer = le16_to_cpu(*((uint16_t *)(&bios->data[bitentry->offset])));
if (load_table_pointer == 0x0) {
DRM_ERROR("Pointer to loadval table invalid\n");
return 0;
}
/* Some kind of signature */
if (bios->data[load_table_pointer] != 16 || bios->data[load_table_pointer + 1] != 4 ||
bios->data[load_table_pointer + 2] != 4 || bios->data[load_table_pointer + 3] != 2)
return 0;
bios->dactestval = le32_to_cpu(*((uint32_t *)&bios->data[load_table_pointer + 4])) & 0x3FF;
return 1;
}
static int parse_bit_C_tbl_entry(struct drm_device *dev, struct bios *bios, struct bit_entry *bitentry)
{
/* offset + 8 (16 bits): PLL limits table pointer
*
* There's more in here, but that's unknown.
*/
if (bitentry->length < 10) {
DRM_ERROR("Do not understand BIT C table\n");
return 0;
}
bios->pll_limit_tbl_ptr = le16_to_cpu(*((uint16_t *)(&bios->data[bitentry->offset + 8])));
return 1;
}
static void parse_bit_structure(struct drm_device *dev, struct bios *bios, const uint16_t bitoffset)
{
int entries = bios->data[bitoffset + 4];
/* parse i first, I next (which needs C & M before it), and L before D */
char parseorder[] = "iCMILDTA";
struct bit_entry bitentry;
int i, j, offset;
for (i = 0; i < sizeof(parseorder); i++) {
for (j = 0, offset = bitoffset + 6; j < entries; j++, offset += 6) {
bitentry.id[0] = bios->data[offset];
bitentry.id[1] = bios->data[offset + 1];
bitentry.length = le16_to_cpu(*((uint16_t *)&bios->data[offset + 2]));
bitentry.offset = le16_to_cpu(*((uint16_t *)&bios->data[offset + 4]));
if (bitentry.id[0] != parseorder[i])
continue;
switch (bitentry.id[0]) {
case 'A':
parse_bit_A_tbl_entry(dev, bios, &bitentry);
break;
case 'C':
parse_bit_C_tbl_entry(dev, bios, &bitentry);
break;
}
}
}
}
static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
{
int i, j;
for (i = 0; i <= (n - len); i++) {
for (j = 0; j < len; j++)
if (data[i + j] != str[j])
break;
if (j == len)
return i;
}
return 0;
}
static void
read_dcb_i2c_entry(struct drm_device *dev, uint8_t dcb_version, uint16_t i2ctabptr, int index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct bios *bios = &dev_priv->bios;
uint8_t *i2ctable = &bios->data[i2ctabptr];
uint8_t headerlen = 0;
int i2c_entries = MAX_NUM_DCB_ENTRIES;
int recordoffset = 0, rdofs = 1, wrofs = 0;
if (!i2ctabptr)
return;
if (dcb_version >= 0x30) {
if (i2ctable[0] != dcb_version) /* necessary? */
DRM_ERROR(
"DCB I2C table version mismatch (%02X vs %02X)\n",
i2ctable[0], dcb_version);
headerlen = i2ctable[1];
i2c_entries = i2ctable[2];
/* same address offset used for read and write for C51 and G80 */
if (bios->chip_version == 0x51)
rdofs = wrofs = 1;
if (i2ctable[0] >= 0x40)
rdofs = wrofs = 0;
}
/* it's your own fault if you call this function on a DCB 1.1 BIOS --
* the test below is for DCB 1.2
*/
if (dcb_version < 0x14) {
recordoffset = 2;
rdofs = 0;
wrofs = 1;
}
if (index == 0xf)
return;
if (index > i2c_entries) {
DRM_ERROR(
"DCB I2C index too big (%d > %d)\n",
index, i2ctable[2]);
return;
}
if (i2ctable[headerlen + 4 * index + 3] == 0xff) {
DRM_ERROR(
"DCB I2C entry invalid\n");
return;
}
if (bios->chip_version == 0x51) {
int port_type = i2ctable[headerlen + 4 * index + 3];
if (port_type != 4)
DRM_ERROR(
"DCB I2C table has port type %d\n", port_type);
}
if (i2ctable[0] >= 0x40) {
int port_type = i2ctable[headerlen + 4 * index + 3];
if (port_type != 5)
DRM_ERROR(
"DCB I2C table has port type %d\n", port_type);
}
dev_priv->dcb_table.i2c_read[index] = i2ctable[headerlen + recordoffset + rdofs + 4 * index];
dev_priv->dcb_table.i2c_write[index] = i2ctable[headerlen + recordoffset + wrofs + 4 * index];
}
static bool
parse_dcb_entry(struct drm_device *dev, int index, uint8_t dcb_version, uint16_t i2ctabptr, uint32_t conn, uint32_t conf)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_entry *entry = &dev_priv->dcb_table.entry[index];
memset(entry, 0, sizeof (struct dcb_entry));
entry->index = index;
/* safe defaults for a crt */
entry->type = 0;
entry->i2c_index = 0;
entry->heads = 1;
entry->bus = 0;
entry->location = LOC_ON_CHIP;
entry->or = 1;
entry->duallink_possible = false;
if (dcb_version >= 0x20) {
entry->type = conn & 0xf;
entry->i2c_index = (conn >> 4) & 0xf;
entry->heads = (conn >> 8) & 0xf;
entry->bus = (conn >> 16) & 0xf;
entry->location = (conn >> 20) & 0xf;
entry->or = (conn >> 24) & 0xf;
/* Normal entries consist of a single bit, but dual link has the
* adjacent more significant bit set too
*/
if ((1 << (ffs(entry->or) - 1)) * 3 == entry->or)
entry->duallink_possible = true;
switch (entry->type) {
case DCB_OUTPUT_LVDS:
{
uint32_t mask;
if (conf & 0x1)
entry->lvdsconf.use_straps_for_mode = true;
if (dcb_version < 0x22) {
mask = ~0xd;
/* both 0x4 and 0x8 show up in v2.0 tables; assume they mean
* the same thing, which is probably wrong, but might work */
if (conf & 0x4 || conf & 0x8)
entry->lvdsconf.use_power_scripts = true;
} else {
mask = ~0x5;
if (conf & 0x4)
entry->lvdsconf.use_power_scripts = true;
}
if (conf & mask) {
if (dcb_version < 0x40) { /* we know g80 cards have unknown bits */
DRM_ERROR("Unknown LVDS configuration bits, please report\n");
/* cause output setting to fail, so message is seen */
dev_priv->dcb_table.entries = 0;
return false;
}
}
break;
}
case 0xe:
/* weird type that appears on g80 mobile bios; nv driver treats it as a terminator */
return false;
}
read_dcb_i2c_entry(dev, dcb_version, i2ctabptr, entry->i2c_index);
} else if (dcb_version >= 0x14 ) {
if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 && conn != 0xf2204301 && conn != 0xf2244311 && conn != 0xf2045f14 && conn != 0xf2205004 && conn != 0xf2208001 && conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011) {
DRM_ERROR(
"Unknown DCB 1.4 / 1.5 entry, please report\n");
/* cause output setting to fail, so message is seen */
dev_priv->dcb_table.entries = 0;
return false;
}
/* most of the below is a "best guess" atm */
entry->type = conn & 0xf;
if (entry->type == 4) { /* digital */
if (conn & 0x10)
entry->type = DCB_OUTPUT_LVDS;
else
entry->type = DCB_OUTPUT_TMDS;
}
/* what's in bits 5-13? could be some brooktree/chrontel/philips thing, in tv case */
entry->i2c_index = (conn >> 14) & 0xf;
/* raw heads field is in range 0-1, so move to 1-2 */
entry->heads = ((conn >> 18) & 0x7) + 1;
entry->location = (conn >> 21) & 0xf;
entry->bus = (conn >> 25) & 0x7;
/* set or to be same as heads -- hopefully safe enough */
entry->or = entry->heads;
switch (entry->type) {
case DCB_OUTPUT_LVDS:
/* this is probably buried in conn's unknown bits */
entry->lvdsconf.use_power_scripts = true;
break;
case DCB_OUTPUT_TMDS:
/* invent a DVI-A output, by copying the fields of the DVI-D output
* reported to work by math_b on an NV20(!) */
memcpy(&entry[1], &entry[0], sizeof(struct dcb_entry));
entry[1].type = DCB_OUTPUT_ANALOG;
dev_priv->dcb_table.entries++;
}
read_dcb_i2c_entry(dev, dcb_version, i2ctabptr, entry->i2c_index);
} else if (dcb_version >= 0x12) {
/* v1.2 tables normally have the same 5 entries, which are not
* specific to the card, so use the defaults for a crt */
/* DCB v1.2 does have an I2C table that read_dcb_i2c_table can handle, but cards
* exist (seen on nv11) where the pointer to the table points to the wrong
* place, so for now, we rely on the indices parsed in parse_bmp_structure
*/
entry->i2c_index = dev_priv->bios.legacy.i2c_indices.crt;
} else { /* pre DCB / v1.1 - use the safe defaults for a crt */
DRM_ERROR(
"No information in BIOS output table; assuming a CRT output exists\n");
entry->i2c_index = dev_priv->bios.legacy.i2c_indices.crt;
}
if (entry->type == DCB_OUTPUT_LVDS && dev_priv->bios.fp.strapping != 0xff)
entry->lvdsconf.use_straps_for_mode = true;
dev_priv->dcb_table.entries++;
return true;
}
static void merge_like_dcb_entries(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
/* DCB v2.0 lists each output combination separately.
* Here we merge compatible entries to have fewer outputs, with more options
*/
int i, newentries = 0;
for (i = 0; i < dev_priv->dcb_table.entries; i++) {
struct dcb_entry *ient = &dev_priv->dcb_table.entry[i];
int j;
for (j = i + 1; j < dev_priv->dcb_table.entries; j++) {
struct dcb_entry *jent = &dev_priv->dcb_table.entry[j];
if (jent->type == 100) /* already merged entry */
continue;
/* merge heads field when all other fields the same */
if (jent->i2c_index == ient->i2c_index && jent->type == ient->type && jent->location == ient->location && jent->or == ient->or) {
DRM_INFO(
"Merging DCB entries %d and %d\n", i, j);
ient->heads |= jent->heads;
jent->type = 100; /* dummy value */
}
}
}
/* Compact entries merged into others out of dcb_table */
for (i = 0; i < dev_priv->dcb_table.entries; i++) {
if ( dev_priv->dcb_table.entry[i].type == 100 )
continue;
if (newentries != i)
memcpy(&dev_priv->dcb_table.entry[newentries], &dev_priv->dcb_table.entry[i], sizeof(struct dcb_entry));
newentries++;
}
dev_priv->dcb_table.entries = newentries;
}
static unsigned int parse_dcb_table(struct drm_device *dev, struct bios *bios)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint16_t dcbptr, i2ctabptr = 0;
uint8_t *dcbtable;
uint8_t dcb_version, headerlen = 0x4, entries = MAX_NUM_DCB_ENTRIES;
bool configblock = true;
int recordlength = 8, confofs = 4;
int i;
dev_priv->dcb_table.entries = 0;
/* get the offset from 0x36 */
dcbptr = le16_to_cpu(*(uint16_t *)&bios->data[0x36]);
if (dcbptr == 0x0) {
DRM_ERROR(
"No Display Configuration Block pointer found\n");
/* this situation likely means a really old card, pre DCB, so we'll add the safe CRT entry */
parse_dcb_entry(dev, 0, 0, 0, 0, 0);
return 1;
}
dcbtable = &bios->data[dcbptr];
/* get DCB version */
dcb_version = dcbtable[0];
DRM_INFO(
"Display Configuration Block version %d.%d found\n",
dcb_version >> 4, dcb_version & 0xf);
if (dcb_version >= 0x20) { /* NV17+ */
uint32_t sig;
if (dcb_version >= 0x30) { /* NV40+ */
headerlen = dcbtable[1];
entries = dcbtable[2];
recordlength = dcbtable[3];
i2ctabptr = le16_to_cpu(*(uint16_t *)&dcbtable[4]);
sig = le32_to_cpu(*(uint32_t *)&dcbtable[6]);
DRM_INFO(
"DCB header length %d, with %d possible entries\n",
headerlen, entries);
} else {
i2ctabptr = le16_to_cpu(*(uint16_t *)&dcbtable[2]);
sig = le32_to_cpu(*(uint32_t *)&dcbtable[4]);
headerlen = 8;
}
if (sig != 0x4edcbdcb) {
DRM_ERROR(
"Bad Display Configuration Block signature (%08X)\n", sig);
return 0;
}
} else if (dcb_version >= 0x14) { /* some NV15/16, and NV11+ */
char sig[8];
memset(sig, 0, 8);
strncpy(sig, (char *)&dcbtable[-7], 7);
i2ctabptr = le16_to_cpu(*(uint16_t *)&dcbtable[2]);
recordlength = 10;
confofs = 6;
if (strcmp(sig, "DEV_REC")) {
DRM_ERROR(
"Bad Display Configuration Block signature (%s)\n", sig);
return 0;
}
} else if (dcb_version >= 0x12) { /* some NV6/10, and NV15+ */
i2ctabptr = le16_to_cpu(*(uint16_t *)&dcbtable[2]);
configblock = false;
} else { /* NV5+, maybe NV4 */
/* DCB 1.1 seems to be quite unhelpful - we'll just add the safe CRT entry */
parse_dcb_entry(dev, 0, dcb_version, 0, 0, 0);
return 1;
}
if (entries >= MAX_NUM_DCB_ENTRIES)
entries = MAX_NUM_DCB_ENTRIES;
for (i = 0; i < entries; i++) {
uint32_t connection, config = 0;
connection = le32_to_cpu(*(uint32_t *)&dcbtable[headerlen + recordlength * i]);
if (configblock)
config = le32_to_cpu(*(uint32_t *)&dcbtable[headerlen + confofs + recordlength * i]);
/* Should we allow discontinuous DCBs? Certainly DCB I2C tables can be discontinuous */
if ((connection & 0x0000000f) == 0x0000000f) /* end of records */
break;
if (connection == 0x00000000) /* seen on an NV11 with DCB v1.5 */
break;
DRM_INFO("Raw DCB entry %d: %08x %08x\n", i, connection, config);
if (!parse_dcb_entry(dev, dev_priv->dcb_table.entries, dcb_version, i2ctabptr, connection, config))
break;
}
merge_like_dcb_entries(dev);
return dev_priv->dcb_table.entries;
}
int nouveau_parse_bios(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
const uint8_t bit_signature[] = { 'B', 'I', 'T' };
int offset;
dev_priv->bios.data = kzalloc(NV50_PROM__ESIZE, GFP_KERNEL);
if (!dev_priv->bios.data)
return -ENOMEM;
if (!nv_shadow_bios(dev, dev_priv->bios.data))
return -EINVAL;
dev_priv->bios.length = dev_priv->bios.data[2] * 512;
if (dev_priv->bios.length > NV50_PROM__ESIZE)
dev_priv->bios.length = NV50_PROM__ESIZE;
if ((offset = findstr(dev_priv->bios.data, dev_priv->bios.length, bit_signature, sizeof(bit_signature)))) {
DRM_INFO("BIT BIOS found\n");
parse_bit_structure(dev, &dev_priv->bios, offset + 4);
} else {
DRM_ERROR("BIT BIOS not found\n");
return -EINVAL;
}
if (parse_dcb_table(dev, &dev_priv->bios))
DRM_INFO("Found %d entries in DCB\n", dev_priv->dcb_table.entries);
return 0;
}
/* temporary */
#define NV_RAMDAC_NVPLL 0x00680500
#define NV_RAMDAC_MPLL 0x00680504
#define NV_RAMDAC_VPLL 0x00680508
# define NV_RAMDAC_PLL_COEFF_MDIV 0x000000FF
# define NV_RAMDAC_PLL_COEFF_NDIV 0x0000FF00
# define NV_RAMDAC_PLL_COEFF_PDIV 0x00070000
# define NV30_RAMDAC_ENABLE_VCO2 (1 << 7)
#define NV_RAMDAC_VPLL2 0x00680520
bool get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
{
/* PLL limits table
*
* Version 0x10: NV31
* One byte header (version), one record of 24 bytes
* Version 0x11: NV36 - Not implemented
* Seems to have same record style as 0x10, but 3 records rather than 1
* Version 0x20: Found on Geforce 6 cards
* Trivial 4 byte BIT header. 31 (0x1f) byte record length
* Version 0x21: Found on Geforce 7, 8 and some Geforce 6 cards
* 5 byte header, fifth byte of unknown purpose. 35 (0x23) byte record
* length in general, some (integrated) have an extra configuration byte
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct bios *bios = &dev_priv->bios;
uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
int pllindex = 0;
uint32_t crystal_strap_mask, crystal_straps;
if (!bios->pll_limit_tbl_ptr) {
if (bios->chip_version >= 0x40 || bios->chip_version == 0x31 || bios->chip_version == 0x36) {
DRM_ERROR("Pointer to PLL limits table invalid\n");
return false;
}
} else {
pll_lim_ver = bios->data[bios->pll_limit_tbl_ptr];
DRM_INFO("Found PLL limits table version 0x%X\n", pll_lim_ver);
}
crystal_strap_mask = 1 << 6;
/* open coded pNv->twoHeads test */
if (bios->chip_version > 0x10 && bios->chip_version != 0x15 &&
bios->chip_version != 0x1a && bios->chip_version != 0x20)
crystal_strap_mask |= 1 << 22;
crystal_straps = NV_READ(NV50_PEXTDEV + 0x0) & crystal_strap_mask;
switch (pll_lim_ver) {
/* we use version 0 to indicate a pre limit table bios (single stage pll)
* and load the hard coded limits instead */
case 0:
break;
case 0x10:
case 0x11: /* strictly v0x11 has 3 entries, but the last two don't seem to get used */
headerlen = 1;
recordlen = 0x18;
entries = 1;
pllindex = 0;
break;
case 0x20:
case 0x21:
headerlen = bios->data[bios->pll_limit_tbl_ptr + 1];
recordlen = bios->data[bios->pll_limit_tbl_ptr + 2];
entries = bios->data[bios->pll_limit_tbl_ptr + 3];
break;
default:
DRM_ERROR("PLL limits table revision not currently supported\n");
return false;
}
/* initialize all members to zero */
memset(pll_lim, 0, sizeof(struct pll_lims));
if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex;
pll_lim->vco1.minfreq = le32_to_cpu(*((uint32_t *)(&bios->data[plloffs])));
pll_lim->vco1.maxfreq = le32_to_cpu(*((uint32_t *)(&bios->data[plloffs + 4])));
pll_lim->vco2.minfreq = le32_to_cpu(*((uint32_t *)(&bios->data[plloffs + 8])));
pll_lim->vco2.maxfreq = le32_to_cpu(*((uint32_t *)(&bios->data[plloffs + 12])));
pll_lim->vco1.min_inputfreq = le32_to_cpu(*((uint32_t *)(&bios->data[plloffs + 16])));
pll_lim->vco2.min_inputfreq = le32_to_cpu(*((uint32_t *)(&bios->data[plloffs + 20])));
pll_lim->vco1.max_inputfreq = pll_lim->vco2.max_inputfreq = INT_MAX;
/* these values taken from nv30/31/36 */
pll_lim->vco1.min_n = 0x1;
if (bios->chip_version == 0x36)
pll_lim->vco1.min_n = 0x5;
pll_lim->vco1.max_n = 0xff;
pll_lim->vco1.min_m = 0x1;
pll_lim->vco1.max_m = 0xd;
pll_lim->vco2.min_n = 0x4;
/* on nv30, 31, 36 (i.e. all cards with two stage PLLs with this
* table version (apart from nv35)), N2 is compared to
* maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
* save a comparison
*/
pll_lim->vco2.max_n = 0x28;
if (bios->chip_version == 0x30 || bios->chip_version == 0x35)
/* only 5 bits available for N2 on nv30/35 */
pll_lim->vco2.max_n = 0x1f;
pll_lim->vco2.min_m = 0x1;
pll_lim->vco2.max_m = 0x4;
} else if (pll_lim_ver) { /* ver 0x20, 0x21 */
uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
uint32_t reg = 0; /* default match */
int i;
/* first entry is default match, if nothing better. warn if reg field nonzero */
if (le32_to_cpu(*((uint32_t *)&bios->data[plloffs])))
DRM_ERROR("Default PLL limit entry has non-zero register field\n");
if (limit_match > MAX_PLL_TYPES)
/* we've been passed a reg as the match */
reg = limit_match;
else /* limit match is a pll type */
for (i = 1; i < entries && !reg; i++) {
uint32_t cmpreg = le32_to_cpu(*((uint32_t *)(&bios->data[plloffs + recordlen * i])));
if (limit_match == NVPLL && (cmpreg == NV_RAMDAC_NVPLL || cmpreg == 0x4000))
reg = cmpreg;
if (limit_match == MPLL && (cmpreg == NV_RAMDAC_MPLL || cmpreg == 0x4020))
reg = cmpreg;
if (limit_match == VPLL1 && (cmpreg == NV_RAMDAC_VPLL || cmpreg == 0x4010))
reg = cmpreg;
if (limit_match == VPLL2 && (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
reg = cmpreg;
}
for (i = 1; i < entries; i++)
if (le32_to_cpu(*((uint32_t *)&bios->data[plloffs + recordlen * i])) == reg) {
pllindex = i;
break;
}
plloffs += recordlen * pllindex;
DRM_INFO("Loading PLL limits for reg 0x%08x\n", pllindex ? reg : 0);
/* frequencies are stored in tables in MHz, kHz are more useful, so we convert */
/* What output frequencies can each VCO generate? */
pll_lim->vco1.minfreq = le16_to_cpu(*((uint16_t *)(&bios->data[plloffs + 4]))) * 1000;
pll_lim->vco1.maxfreq = le16_to_cpu(*((uint16_t *)(&bios->data[plloffs + 6]))) * 1000;
pll_lim->vco2.minfreq = le16_to_cpu(*((uint16_t *)(&bios->data[plloffs + 8]))) * 1000;
pll_lim->vco2.maxfreq = le16_to_cpu(*((uint16_t *)(&bios->data[plloffs + 10]))) * 1000;
/* What input frequencies do they accept (past the m-divider)? */
pll_lim->vco1.min_inputfreq = le16_to_cpu(*((uint16_t *)(&bios->data[plloffs + 12]))) * 1000;
pll_lim->vco2.min_inputfreq = le16_to_cpu(*((uint16_t *)(&bios->data[plloffs + 14]))) * 1000;
pll_lim->vco1.max_inputfreq = le16_to_cpu(*((uint16_t *)(&bios->data[plloffs + 16]))) * 1000;
pll_lim->vco2.max_inputfreq = le16_to_cpu(*((uint16_t *)(&bios->data[plloffs + 18]))) * 1000;
/* What values are accepted as multiplier and divider? */
pll_lim->vco1.min_n = bios->data[plloffs + 20];
pll_lim->vco1.max_n = bios->data[plloffs + 21];
pll_lim->vco1.min_m = bios->data[plloffs + 22];
pll_lim->vco1.max_m = bios->data[plloffs + 23];
pll_lim->vco2.min_n = bios->data[plloffs + 24];
pll_lim->vco2.max_n = bios->data[plloffs + 25];
pll_lim->vco2.min_m = bios->data[plloffs + 26];
pll_lim->vco2.max_m = bios->data[plloffs + 27];
pll_lim->unk1c = bios->data[plloffs + 28];
pll_lim->max_log2p_bias = bios->data[plloffs + 29];
pll_lim->log2p_bias = bios->data[plloffs + 30];
if (recordlen > 0x22)
pll_lim->refclk = le32_to_cpu(*((uint32_t *)&bios->data[plloffs + 31]));
if (recordlen > 0x23)
if (bios->data[plloffs + 35])
DRM_ERROR("Bits set in PLL configuration byte (%x)\n", bios->data[plloffs + 35]);
/* C51 special not seen elsewhere */
/*if (bios->chip_version == 0x51 && !pll_lim->refclk) {
uint32_t sel_clk = nv32_rd(pScrn, NV_RAMDAC_SEL_CLK);
if (((limit_match == NV_RAMDAC_VPLL || limit_match == VPLL1) && sel_clk & 0x20) ||
((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) {
if (nv_idx_port_rd(pScrn, CRTC_INDEX_COLOR, NV_VGA_CRTCX_REVISION) < 0xa3)
pll_lim->refclk = 200000;
else
pll_lim->refclk = 25000;
}
}*/
}
/* By now any valid limit table ought to have set a max frequency for
* vco1, so if it's zero it's either a pre limit table bios, or one
* with an empty limit table (seen on nv18)
*/
if (!pll_lim->vco1.maxfreq) {
pll_lim->vco1.minfreq = bios->fminvco;
pll_lim->vco1.maxfreq = bios->fmaxvco;
pll_lim->vco1.min_inputfreq = 0;
pll_lim->vco1.max_inputfreq = INT_MAX;
pll_lim->vco1.min_n = 0x1;
pll_lim->vco1.max_n = 0xff;
pll_lim->vco1.min_m = 0x1;
if (crystal_straps == 0) {
/* nv05 does this, nv11 doesn't, nv10 unknown */
if (bios->chip_version < 0x11)
pll_lim->vco1.min_m = 0x7;
pll_lim->vco1.max_m = 0xd;
} else {
if (bios->chip_version < 0x11)
pll_lim->vco1.min_m = 0x8;
pll_lim->vco1.max_m = 0xe;
}
}
if (!pll_lim->refclk)
switch (crystal_straps) {
case 0:
pll_lim->refclk = 13500;
break;
case (1 << 6):
pll_lim->refclk = 14318;
break;
case (1 << 22):
pll_lim->refclk = 27000;
break;
case (1 << 22 | 1 << 6):
pll_lim->refclk = 25000;
break;
}
#if 1 /* for easy debugging */
DRM_INFO("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
DRM_INFO("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
DRM_INFO("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
DRM_INFO("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
DRM_INFO("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
DRM_INFO("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
DRM_INFO("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
DRM_INFO("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
DRM_INFO("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
DRM_INFO("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
DRM_INFO("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
DRM_INFO("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
DRM_INFO("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
DRM_INFO("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
DRM_INFO("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
DRM_INFO("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
DRM_INFO("pll.unk1c: %d\n", pll_lim->unk1c);
DRM_INFO("pll.max_log2p_bias: %d\n", pll_lim->max_log2p_bias);
DRM_INFO("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
DRM_INFO("pll.refclk: %d\n", pll_lim->refclk);
#endif
return true;
}

151
linux-core/nouveau_bios.h Normal file
View File

@ -0,0 +1,151 @@
/*
* Copyright (C) 2005-2006 Erik Waling
* Copyright (C) 2006 Stephane Marchesin
* Copyright (C) 2007-2008 Stuart Bennett
* Copyright (C) 2008 Maarten Maathuis.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __NOUVEAU_BIOS_H__
#define __NOUVEAU_BIOS_H__
#include "drmP.h"
#include "drm.h"
#define LOC_ON_CHIP 0
enum dcb_output_type {/* matches DCB types */
DCB_OUTPUT_NONE = 4,
DCB_OUTPUT_ANALOG = 0,
DCB_OUTPUT_TMDS = 2,
DCB_OUTPUT_LVDS = 3,
DCB_OUTPUT_TV = 1,
};
struct bios {
uint8_t *data;
unsigned int length;
bool execute;
uint8_t major_version, chip_version;
uint8_t feature_byte;
uint32_t fmaxvco, fminvco;
uint32_t dactestval;
uint16_t init_script_tbls_ptr;
uint16_t extra_init_script_tbl_ptr;
uint16_t macro_index_tbl_ptr;
uint16_t macro_tbl_ptr;
uint16_t condition_tbl_ptr;
uint16_t io_condition_tbl_ptr;
uint16_t io_flag_condition_tbl_ptr;
uint16_t init_function_tbl_ptr;
uint16_t pll_limit_tbl_ptr;
uint16_t ram_restrict_tbl_ptr;
struct {
struct nouveau_hw_mode *native_mode;
uint8_t *edid;
uint16_t lvdsmanufacturerpointer;
uint16_t xlated_entry;
bool power_off_for_reset;
bool reset_after_pclk_change;
bool dual_link;
bool link_c_increment;
bool if_is_24bit;
bool BITbit1;
int duallink_transition_clk;
/* lower nibble stores PEXTDEV_BOOT_0 strap
* upper nibble stores xlated display strap */
uint8_t strapping;
} fp;
struct {
uint16_t output0_script_ptr;
uint16_t output1_script_ptr;
} tmds;
struct {
uint16_t mem_init_tbl_ptr;
uint16_t sdr_seq_tbl_ptr;
uint16_t ddr_seq_tbl_ptr;
struct {
uint8_t crt, tv, panel;
} i2c_indices;
} legacy;
};
struct dcb_entry {
int index;
uint8_t type;
uint8_t i2c_index;
uint8_t heads;
uint8_t bus;
uint8_t location;
uint8_t or;
bool duallink_possible;
union {
struct {
bool use_straps_for_mode;
bool use_power_scripts;
} lvdsconf;
};
};
/* changing these requires matching changes to reg tables in nv_get_clock */
#define MAX_PLL_TYPES 4
enum pll_types {
NVPLL,
MPLL,
VPLL1,
VPLL2
};
struct pll_lims {
struct {
int minfreq;
int maxfreq;
int min_inputfreq;
int max_inputfreq;
uint8_t min_m;
uint8_t max_m;
uint8_t min_n;
uint8_t max_n;
} vco1, vco2;
uint8_t unk1c;
uint8_t max_log2p_bias;
uint8_t log2p_bias;
int refclk;
};
bool get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim);
int nouveau_parse_bios(struct drm_device *dev);
#endif /* __NOUVEAU_BIOS_H__ */

Some files were not shown because too many files have changed in this diff Show More