static int amdgpu_identity_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper,
- int ring,
+ u32 ring,
struct amdgpu_ring **out_ring)
{
switch (mapper->hw_ip) {
static int amdgpu_lru_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper,
- int user_ring, bool lru_pipe_order,
+ u32 user_ring, bool lru_pipe_order,
struct amdgpu_ring **out_ring)
{
int r, i, j;
*/
int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr,
- int hw_ip, int instance, int ring,
+ u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring)
{
int r, ip_num_rings;
/* Right now all IPs have only one instance - multiple rings. */
if (instance != 0) {
- DRM_ERROR("invalid ip instance: %d\n", instance);
+ DRM_DEBUG("invalid ip instance: %d\n", instance);
return -EINVAL;
}
ip_num_rings = adev->vcn.num_enc_rings;
break;
default:
- DRM_ERROR("unknown ip type: %d\n", hw_ip);
+ DRM_DEBUG("unknown ip type: %d\n", hw_ip);
return -EINVAL;
}
if (ring >= ip_num_rings) {
- DRM_ERROR("Ring index:%d exceeds maximum:%d for ip:%d\n",
- ring, ip_num_rings, hw_ip);
+ DRM_DEBUG("Ring index:%d exceeds maximum:%d for ip:%d\n",
+ ring, ip_num_rings, hw_ip);
return -EINVAL;
}
default:
*out_ring = NULL;
r = -EINVAL;
- DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
+ DRM_DEBUG("unknown HW IP type: %d\n", mapper->hw_ip);
}
out_unlock: