intel: annotate public functions

This was done with:
while read sym; do
	read f func line _ <<<$(cscope -d -L -1 $sym)
	if [ ! -z "$f" ]; then
		line=$((line-1))
		sed -i "${line}s/^/drm_public /" $f
	fi
done < /tmp/a.txt

Then some corner cases were manually fixed. "a.txt" above contains the
symbols collected from intel/intel-symbol-check. The idea here will be
to switch the default visibility to hidden so we don't export symbols we
shouldn't.

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Eric Engestrom <eric.engestrom@intel.com>
main
Lucas De Marchi 2018-09-12 13:09:08 -07:00
parent a9463bdb01
commit 36bb0ea47b
5 changed files with 82 additions and 81 deletions

View File

@ -45,21 +45,21 @@
* Convenience functions for buffer management methods.
*/
drm_intel_bo *
drm_public drm_intel_bo *
drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
drm_intel_bo *
drm_public drm_intel_bo *
drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
}
drm_intel_bo *
drm_public drm_intel_bo *
drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
const char *name, void *addr,
uint32_t tiling_mode,
@ -73,7 +73,7 @@ drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
return NULL;
}
drm_intel_bo *
drm_public drm_intel_bo *
drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags)
@ -82,13 +82,13 @@ drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
tiling_mode, pitch, flags);
}
void
drm_public void
drm_intel_bo_reference(drm_intel_bo *bo)
{
bo->bufmgr->bo_reference(bo);
}
void
drm_public void
drm_intel_bo_unreference(drm_intel_bo *bo)
{
if (bo == NULL)
@ -97,26 +97,26 @@ drm_intel_bo_unreference(drm_intel_bo *bo)
bo->bufmgr->bo_unreference(bo);
}
int
drm_public int
drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
{
return buf->bufmgr->bo_map(buf, write_enable);
}
int
drm_public int
drm_intel_bo_unmap(drm_intel_bo *buf)
{
return buf->bufmgr->bo_unmap(buf);
}
int
drm_public int
drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
return bo->bufmgr->bo_subdata(bo, offset, size, data);
}
int
drm_public int
drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
@ -135,26 +135,26 @@ drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
return 0;
}
void
drm_public void
drm_intel_bo_wait_rendering(drm_intel_bo *bo)
{
bo->bufmgr->bo_wait_rendering(bo);
}
void
drm_public void
drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
{
bufmgr->destroy(bufmgr);
}
int
drm_public int
drm_intel_bo_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
{
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
}
int
drm_public int
drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
unsigned int rings)
@ -174,19 +174,19 @@ drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
}
}
void
drm_public void
drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
{
bufmgr->debug = enable_debug;
}
int
drm_public int
drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count)
{
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
}
int
drm_public int
drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
{
if (bo->bufmgr->bo_flink)
@ -195,7 +195,7 @@ drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
return -ENODEV;
}
int
drm_public int
drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
@ -206,7 +206,7 @@ drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
}
/* For fence registers, not GL fences */
int
drm_public int
drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
@ -217,7 +217,7 @@ drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
}
int
drm_public int
drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
{
if (bo->bufmgr->bo_pin)
@ -226,7 +226,7 @@ drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
return -ENODEV;
}
int
drm_public int
drm_intel_bo_unpin(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_unpin)
@ -235,7 +235,7 @@ drm_intel_bo_unpin(drm_intel_bo *bo)
return -ENODEV;
}
int
drm_public int
drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride)
{
@ -246,7 +246,7 @@ drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
return 0;
}
int
drm_public int
drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode)
{
@ -258,7 +258,7 @@ drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
return 0;
}
int
drm_public int
drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
{
if (bo->bufmgr->bo_set_softpin_offset)
@ -267,7 +267,7 @@ drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
return -ENODEV;
}
int
drm_public int
drm_intel_bo_disable_reuse(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_disable_reuse)
@ -275,7 +275,7 @@ drm_intel_bo_disable_reuse(drm_intel_bo *bo)
return 0;
}
int
drm_public int
drm_intel_bo_is_reusable(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_is_reusable)
@ -283,7 +283,7 @@ drm_intel_bo_is_reusable(drm_intel_bo *bo)
return 0;
}
int
drm_public int
drm_intel_bo_busy(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_busy)
@ -291,7 +291,7 @@ drm_intel_bo_busy(drm_intel_bo *bo)
return 0;
}
int
drm_public int
drm_intel_bo_madvise(drm_intel_bo *bo, int madv)
{
if (bo->bufmgr->bo_madvise)
@ -299,7 +299,7 @@ drm_intel_bo_madvise(drm_intel_bo *bo, int madv)
return -1;
}
int
drm_public int
drm_intel_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
{
if (bo->bufmgr->bo_use_48b_address_range) {
@ -310,13 +310,13 @@ drm_intel_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
return -ENODEV;
}
int
drm_public int
drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
{
return bo->bufmgr->bo_references(bo, target_bo);
}
int
drm_public int
drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
{
if (bufmgr->get_pipe_from_crtc_id)
@ -360,7 +360,7 @@ drm_intel_probe_agp_aperture_size(int fd)
}
#endif
int
drm_public int
drm_intel_get_aperture_sizes(int fd, size_t *mappable, size_t *total)
{

View File

@ -764,7 +764,7 @@ drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo)
* -- just evict everything
* -- and wait for idle
*/
void
drm_public void
drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
@ -860,7 +860,7 @@ drm_intel_fake_bo_alloc_tiled(drm_intel_bufmgr * bufmgr,
4096);
}
drm_intel_bo *
drm_public drm_intel_bo *
drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long offset,
@ -1532,7 +1532,7 @@ drm_intel_fake_check_aperture_space(drm_intel_bo ** bo_array, int count)
* Used by the X Server on LeaveVT, when the card memory is no longer our
* own.
*/
void
drm_public void
drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
@ -1567,7 +1567,7 @@ drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
pthread_mutex_unlock(&bufmgr_fake->lock);
}
void
drm_public void
drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
volatile unsigned int
*last_dispatch)
@ -1577,7 +1577,7 @@ drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
}
drm_intel_bufmgr *
drm_public drm_intel_bufmgr *
drm_intel_bufmgr_fake_init(int fd, unsigned long low_offset,
void *low_virtual, unsigned long size,
volatile unsigned int *last_dispatch)

View File

@ -1075,7 +1075,7 @@ check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
* This can be used when one application needs to pass a buffer object
* to another.
*/
drm_intel_bo *
drm_public drm_intel_bo *
drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned int handle)
@ -1562,7 +1562,7 @@ map_gtt(drm_intel_bo *bo)
return 0;
}
int
drm_public int
drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
@ -1621,7 +1621,7 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
* undefined).
*/
int
drm_public int
drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
@ -1710,7 +1710,7 @@ static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
return ret;
}
int
drm_public int
drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
{
return drm_intel_gem_bo_unmap(bo);
@ -1835,7 +1835,7 @@ drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
* Note that some kernels have broken the inifite wait for negative values
* promise, upgrade to latest stable kernels if this is the case.
*/
int
drm_public int
drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
@ -1871,7 +1871,7 @@ drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
* In combination with drm_intel_gem_bo_pin() and manual fence management, we
* can do tiled pixmaps this way.
*/
void
drm_public void
drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
@ -2098,7 +2098,7 @@ drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
read_domains, write_domain, true);
}
int
drm_public int
drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
@ -2121,7 +2121,7 @@ drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
*
* This also removes all softpinned targets being referenced by the BO.
*/
void
drm_public void
drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
@ -2277,7 +2277,7 @@ drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
}
}
void
drm_public void
drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
int x1, int y1, int width, int height,
enum aub_dump_bmp_format format,
@ -2479,14 +2479,14 @@ drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
-1, NULL, flags);
}
int
drm_public int
drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
int used, unsigned int flags)
{
return do_exec2(bo, used, ctx, NULL, 0, 0, -1, NULL, flags);
}
int
drm_public int
drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
drm_intel_context *ctx,
int used,
@ -2627,7 +2627,7 @@ drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
return 0;
}
drm_intel_bo *
drm_public drm_intel_bo *
drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
@ -2710,7 +2710,7 @@ err:
return NULL;
}
int
drm_public int
drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
@ -2762,7 +2762,7 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
* size is only bounded by how many buffers of that size we've managed to have
* in flight at once.
*/
void
drm_public void
drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
@ -2784,7 +2784,7 @@ drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
* which can be checked using drm_intel_bufmgr_can_disable_implicit_sync,
* or subsequent execbufs involving the bo will generate EINVAL.
*/
void
drm_public void
drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
@ -2803,7 +2803,7 @@ drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo)
* function can be used to restore the implicit sync before subsequent
* rendering.
*/
void
drm_public void
drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
@ -2815,7 +2815,7 @@ drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo)
* Query whether the kernel supports disabling of its implicit synchronisation
* before execbuf. See drm_intel_gem_bo_disable_implicit_sync()
*/
int
drm_public int
drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
@ -2830,7 +2830,7 @@ drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr)
* allocation. If this option is not enabled, all relocs will have fence
* register allocated.
*/
void
drm_public void
drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@ -3109,7 +3109,7 @@ init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
}
}
void
drm_public void
drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@ -3178,7 +3178,7 @@ get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
return devid;
}
int
drm_public int
drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@ -3192,7 +3192,7 @@ drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
* This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
* for it to have any effect.
*/
void
drm_public void
drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
const char *filename)
{
@ -3206,7 +3206,7 @@ drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
* You can set up a GTT and upload your objects into the referenced
* space, then send off batchbuffers and get BMPs out the other end.
*/
void
drm_public void
drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
{
fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
@ -3216,7 +3216,7 @@ drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
"See the intel_aubdump man page for more details.\n");
}
drm_intel_context *
drm_public drm_intel_context *
drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@ -3243,7 +3243,7 @@ drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
return context;
}
int
drm_public int
drm_intel_gem_context_get_id(drm_intel_context *ctx, uint32_t *ctx_id)
{
if (ctx == NULL)
@ -3254,7 +3254,7 @@ drm_intel_gem_context_get_id(drm_intel_context *ctx, uint32_t *ctx_id)
return 0;
}
void
drm_public void
drm_intel_gem_context_destroy(drm_intel_context *ctx)
{
drm_intel_bufmgr_gem *bufmgr_gem;
@ -3277,7 +3277,7 @@ drm_intel_gem_context_destroy(drm_intel_context *ctx)
free(ctx);
}
int
drm_public int
drm_intel_get_reset_stats(drm_intel_context *ctx,
uint32_t *reset_count,
uint32_t *active,
@ -3311,7 +3311,7 @@ drm_intel_get_reset_stats(drm_intel_context *ctx,
return ret;
}
int
drm_public int
drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result)
@ -3329,7 +3329,7 @@ drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
return ret;
}
int
drm_public int
drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
{
drm_i915_getparam_t gp;
@ -3345,7 +3345,7 @@ drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
return 0;
}
int
drm_public int
drm_intel_get_eu_total(int fd, unsigned int *eu_total)
{
drm_i915_getparam_t gp;
@ -3361,7 +3361,7 @@ drm_intel_get_eu_total(int fd, unsigned int *eu_total)
return 0;
}
int
drm_public int
drm_intel_get_pooled_eu(int fd)
{
drm_i915_getparam_t gp;
@ -3376,7 +3376,7 @@ drm_intel_get_pooled_eu(int fd)
return ret;
}
int
drm_public int
drm_intel_get_min_eu_in_pool(int fd)
{
drm_i915_getparam_t gp;
@ -3412,8 +3412,7 @@ drm_intel_get_min_eu_in_pool(int fd)
* default state (no annotations), call this function with a \c count
* of zero.
*/
void
drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
drm_public void drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
drm_intel_aub_annotation *annotations,
unsigned count)
{
@ -3454,7 +3453,7 @@ drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
}
}
void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
drm_public void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
@ -3502,7 +3501,7 @@ void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
return bo_gem->gtt_virtual;
}
void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
drm_public void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
@ -3546,7 +3545,7 @@ void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
return bo_gem->mem_virtual;
}
void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
drm_public void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
@ -3595,7 +3594,7 @@ void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
*
* \param fd File descriptor of the opened DRM device.
*/
drm_intel_bufmgr *
drm_public drm_intel_bufmgr *
drm_intel_bufmgr_gem_init(int fd, int batch_size)
{
drm_intel_bufmgr_gem *bufmgr_gem;

View File

@ -3811,7 +3811,7 @@ decode_3d_i830(struct drm_intel_decode *ctx)
return 1;
}
struct drm_intel_decode *
drm_public struct drm_intel_decode *
drm_intel_decode_context_alloc(uint32_t devid)
{
struct drm_intel_decode *ctx;
@ -3845,20 +3845,20 @@ drm_intel_decode_context_alloc(uint32_t devid)
return ctx;
}
void
drm_public void
drm_intel_decode_context_free(struct drm_intel_decode *ctx)
{
free(ctx);
}
void
drm_public void
drm_intel_decode_set_dump_past_end(struct drm_intel_decode *ctx,
int dump_past_end)
{
ctx->dump_past_end = !!dump_past_end;
}
void
drm_public void
drm_intel_decode_set_batch_pointer(struct drm_intel_decode *ctx,
void *data, uint32_t hw_offset, int count)
{
@ -3867,7 +3867,7 @@ drm_intel_decode_set_batch_pointer(struct drm_intel_decode *ctx,
ctx->base_count = count;
}
void
drm_public void
drm_intel_decode_set_head_tail(struct drm_intel_decode *ctx,
uint32_t head, uint32_t tail)
{
@ -3875,7 +3875,7 @@ drm_intel_decode_set_head_tail(struct drm_intel_decode *ctx,
ctx->tail = tail;
}
void
drm_public void
drm_intel_decode_set_output_file(struct drm_intel_decode *ctx,
FILE *output)
{
@ -3889,7 +3889,7 @@ drm_intel_decode_set_output_file(struct drm_intel_decode *ctx,
* \param count number of DWORDs to decode in the batch buffer
* \param hw_offset hardware address for the buffer
*/
void
drm_public void
drm_intel_decode(struct drm_intel_decode *ctx)
{
int ret;

View File

@ -25,8 +25,10 @@
#if HAVE_VISIBILITY
# define drm_private __attribute__((visibility("hidden")))
# define drm_public __attribute__((visibility("default")))
#else
# define drm_private
# define drm_public
#endif