amdgpu: cleanup public interface style

Fix some style problems, adjust to a common indentation, reorder two
function definitions and remove stale comments.

No intended functional change.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
main
Christian König 2015-06-30 16:04:44 +02:00 committed by Alex Deucher
parent 4a9f5f2e1e
commit 558e1294f2
1 changed files with 90 additions and 136 deletions

View File

@ -232,7 +232,6 @@ struct amdgpu_bo_import_result {
uint64_t virtual_mc_base_address; uint64_t virtual_mc_base_address;
}; };
/** /**
* *
* Structure to describe GDS partitioning information. * Structure to describe GDS partitioning information.
@ -270,8 +269,7 @@ struct amdgpu_cs_dep_info {
/** Ring index of the HW IP */ /** Ring index of the HW IP */
uint32_t ring; uint32_t ring;
/** Specify fence for which we need to check /** Specify fence for which we need to check submission status.*/
* submission status.*/
uint64_t fence; uint64_t fence;
}; };
@ -291,7 +289,6 @@ struct amdgpu_cs_ib_info {
/** /**
* Size of Command Buffer to be submitted. * Size of Command Buffer to be submitted.
* - The size is in units of dwords (4 bytes). * - The size is in units of dwords (4 bytes).
* - Must be less or equal to the size of allocated IB
* - Could be 0 * - Could be 0
*/ */
uint32_t size; uint32_t size;
@ -372,8 +369,7 @@ struct amdgpu_cs_query_fence {
/** Flags */ /** Flags */
uint64_t flags; uint64_t flags;
/** Specify fence for which we need to check /** Specify fence for which we need to check submission status.*/
* submission status.*/
uint64_t fence; uint64_t fence;
}; };
@ -394,7 +390,6 @@ struct amdgpu_buffer_size_alignments {
uint64_t size_remote; uint64_t size_remote;
}; };
/** /**
* Structure which provide information about heap * Structure which provide information about heap
* *
@ -420,8 +415,6 @@ struct amdgpu_heap_info {
uint64_t max_allocation; uint64_t max_allocation;
}; };
/** /**
* Describe GPU h/w info needed for UMD correct initialization * Describe GPU h/w info needed for UMD correct initialization
* *
@ -430,7 +423,7 @@ struct amdgpu_heap_info {
struct amdgpu_gpu_info { struct amdgpu_gpu_info {
/** Asic id */ /** Asic id */
uint32_t asic_id; uint32_t asic_id;
/**< Chip revision */ /** Chip revision */
uint32_t chip_rev; uint32_t chip_rev;
/** Chip external revision */ /** Chip external revision */
uint32_t chip_external_rev; uint32_t chip_external_rev;
@ -496,14 +489,14 @@ struct amdgpu_gpu_info {
* *
*/ */
/** /**
* *
* \param fd - \c [in] File descriptor for AMD GPU device * \param fd - \c [in] File descriptor for AMD GPU device
* received previously as the result of * received previously as the result of
* e.g. drmOpen() call. * e.g. drmOpen() call.
* For legacy fd type, the DRI2/DRI3 authentication * For legacy fd type, the DRI2/DRI3
* should be done before calling this function. * authentication should be done before
* calling this function.
* \param major_version - \c [out] Major version of library. It is assumed * \param major_version - \c [out] Major version of library. It is assumed
* that adding new functionality will cause * that adding new functionality will cause
* increase in major version * increase in major version
@ -525,8 +518,6 @@ int amdgpu_device_initialize(int fd,
uint32_t *minor_version, uint32_t *minor_version,
amdgpu_device_handle *device_handle); amdgpu_device_handle *device_handle);
/** /**
* *
* When access to such library does not needed any more the special * When access to such library does not needed any more the special
@ -547,7 +538,6 @@ int amdgpu_device_initialize(int fd,
*/ */
int amdgpu_device_deinitialize(amdgpu_device_handle device_handle); int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
/* /*
* Memory Management * Memory Management
* *
@ -651,6 +641,42 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
uint32_t shared_handle, uint32_t shared_handle,
struct amdgpu_bo_import_result *output); struct amdgpu_bo_import_result *output);
/**
* Request GPU access to user allocated memory e.g. via "malloc"
*
* \param dev - [in] Device handle. See #amdgpu_device_initialize()
* \param cpu - [in] CPU address of user allocated memory which we
* want to map to GPU address space (make GPU accessible)
* (This address must be correctly aligned).
* \param size - [in] Size of allocation (must be correctly aligned)
* \param amdgpu_bo_alloc_result - [out] Handle of allocation to be passed as
* resource on submission and be used in other operations.
*
*
* \return 0 on success
* >0 - AMD specific error code
* <0 - Negative POSIX Error code
*
* \note
* This call doesn't guarantee that such memory will be persistently
* "locked" / make non-pageable. The purpose of this call is to provide
* opportunity for GPU get access to this resource during submission.
*
* The maximum amount of memory which could be mapped in this call depends
* if overcommit is disabled or not. If overcommit is disabled than the max.
* amount of memory to be pinned will be limited by left "free" size in total
* amount of memory which could be locked simultaneously ("GART" size).
*
* Supported (theoretical) max. size of mapping is restricted only by
* "GART" size.
*
* It is responsibility of caller to correctly specify access rights
* on VA assignment.
*/
int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
void *cpu, uint64_t size,
struct amdgpu_bo_alloc_result *info);
/** /**
* Free previosuly allocated memory * Free previosuly allocated memory
* *
@ -701,7 +727,6 @@ int amdgpu_bo_cpu_map(amdgpu_bo_handle buf_handle, void **cpu);
*/ */
int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle); int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
/** /**
* Wait until a buffer is not used by the device. * Wait until a buffer is not used by the device.
* *
@ -713,7 +738,8 @@ int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
* 1 GPU access is in fly or scheduled * 1 GPU access is in fly or scheduled
* *
* \return 0 - on success * \return 0 - on success
* <0 - AMD specific error code * >0 - AMD specific error code
* <0 - Negative POSIX Error code
*/ */
int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle, int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
uint64_t timeout_ns, uint64_t timeout_ns,
@ -773,28 +799,6 @@ int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
amdgpu_bo_handle *resources, amdgpu_bo_handle *resources,
uint8_t *resource_prios); uint8_t *resource_prios);
/*
* Special GPU Resources
*
*/
/**
* Query information about GDS
*
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
* \param gds_info - \c [out] Pointer to structure to get GDS information
*
* \return 0 on success\n
* >0 - AMD specific error code\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_query_gds_info(amdgpu_device_handle dev,
struct amdgpu_gds_resource_info *gds_info);
/* /*
* GPU Execution context * GPU Execution context
* *
@ -855,7 +859,6 @@ int amdgpu_cs_ctx_free(amdgpu_context_handle context);
int amdgpu_cs_query_reset_state(amdgpu_context_handle context, int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
uint32_t *state, uint32_t *hangs); uint32_t *state, uint32_t *hangs);
/* /*
* Command Buffers Management * Command Buffers Management
* *
@ -929,13 +932,11 @@ int amdgpu_cs_submit(amdgpu_context_handle context,
int amdgpu_cs_query_fence_status(struct amdgpu_cs_query_fence *fence, int amdgpu_cs_query_fence_status(struct amdgpu_cs_query_fence *fence,
uint32_t *expired); uint32_t *expired);
/* /*
* Query / Info API * Query / Info API
* *
*/ */
/** /**
* Query allocation size alignments * Query allocation size alignments
* *
@ -956,8 +957,6 @@ int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
struct amdgpu_buffer_size_alignments struct amdgpu_buffer_size_alignments
*info); *info);
/** /**
* Query firmware versions * Query firmware versions
* *
@ -977,8 +976,6 @@ int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
unsigned ip_instance, unsigned index, unsigned ip_instance, unsigned index,
uint32_t *version, uint32_t *feature); uint32_t *version, uint32_t *feature);
/** /**
* Query the number of HW IP instances of a certain type. * Query the number of HW IP instances of a certain type.
* *
@ -993,8 +990,6 @@ int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type, int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
uint32_t *count); uint32_t *count);
/** /**
* Query engine information * Query engine information
* *
@ -1014,9 +1009,6 @@ int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
unsigned ip_instance, unsigned ip_instance,
struct drm_amdgpu_info_hw_ip *info); struct drm_amdgpu_info_hw_ip *info);
/** /**
* Query heap information * Query heap information
* *
@ -1032,12 +1024,8 @@ int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
* <0 - Negative POSIX Error code * <0 - Negative POSIX Error code
* *
*/ */
int amdgpu_query_heap_info(amdgpu_device_handle dev, int amdgpu_query_heap_info(amdgpu_device_handle dev, uint32_t heap,
uint32_t heap, uint32_t flags, struct amdgpu_heap_info *info);
uint32_t flags,
struct amdgpu_heap_info *info);
/** /**
* Get the CRTC ID from the mode object ID * Get the CRTC ID from the mode object ID
@ -1054,8 +1042,6 @@ int amdgpu_query_heap_info(amdgpu_device_handle dev,
int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id, int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
int32_t *result); int32_t *result);
/** /**
* Query GPU H/w Info * Query GPU H/w Info
* *
@ -1073,8 +1059,6 @@ int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
int amdgpu_query_gpu_info(amdgpu_device_handle dev, int amdgpu_query_gpu_info(amdgpu_device_handle dev,
struct amdgpu_gpu_info *info); struct amdgpu_gpu_info *info);
/** /**
* Query hardware or driver information. * Query hardware or driver information.
* *
@ -1094,7 +1078,19 @@ int amdgpu_query_gpu_info(amdgpu_device_handle dev,
int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id, int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
unsigned size, void *value); unsigned size, void *value);
/**
* Query information about GDS
*
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
* \param gds_info - \c [out] Pointer to structure to get GDS information
*
* \return 0 on success\n
* >0 - AMD specific error code\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_query_gds_info(amdgpu_device_handle dev,
struct amdgpu_gds_resource_info *gds_info);
/** /**
* Read a set of consecutive memory-mapped registers. * Read a set of consecutive memory-mapped registers.
@ -1118,46 +1114,4 @@ int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
unsigned count, uint32_t instance, uint32_t flags, unsigned count, uint32_t instance, uint32_t flags,
uint32_t *values); uint32_t *values);
/**
* Request GPU access to user allocated memory e.g. via "malloc"
*
* \param dev - [in] Device handle. See #amdgpu_device_initialize()
* \param cpu - [in] CPU address of user allocated memory which we
* want to map to GPU address space (make GPU accessible)
* (This address must be correctly aligned).
* \param size - [in] Size of allocation (must be correctly aligned)
* \param amdgpu_bo_alloc_result - [out] Handle of allocation to be passed as resource
* on submission and be used in other operations.(e.g. for VA submission)
* ( Temporally defined amdgpu_bo_alloc_result as parameter for return mc address. )
*
*
* \return 0 on success
* >0 - AMD specific error code
* <0 - Negative POSIX Error code
*
*
* \note
* This call doesn't guarantee that such memory will be persistently
* "locked" / make non-pageable. The purpose of this call is to provide
* opportunity for GPU get access to this resource during submission.
*
* The maximum amount of memory which could be mapped in this call depends
* if overcommit is disabled or not. If overcommit is disabled than the max.
* amount of memory to be pinned will be limited by left "free" size in total
* amount of memory which could be locked simultaneously ("GART" size).
*
* Supported (theoretical) max. size of mapping is restricted only by
* "GART" size.
*
* It is responsibility of caller to correctly specify access rights
* on VA assignment.
*/
int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
void *cpu,
uint64_t size,
struct amdgpu_bo_alloc_result *info);
#endif /* #ifdef _AMDGPU_H_ */ #endif /* #ifdef _AMDGPU_H_ */