mirror of
https://github.com/torvalds/linux.git
synced 2025-11-04 10:40:15 +02:00
drm/amdgpu: validate userq buffer virtual address and size
It needs to validate the userq object virtual address to determine whether it is residented in a valid vm mapping. Signed-off-by: Prike Liang <Prike.Liang@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
4ba48fc3da
commit
9e46b8bb05
3 changed files with 58 additions and 0 deletions
|
|
@ -44,6 +44,38 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
|
||||||
return userq_ip_mask;
|
return userq_ip_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
|
||||||
|
u64 expected_size)
|
||||||
|
{
|
||||||
|
struct amdgpu_bo_va_mapping *va_map;
|
||||||
|
u64 user_addr;
|
||||||
|
u64 size;
|
||||||
|
int r = 0;
|
||||||
|
|
||||||
|
user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
|
||||||
|
size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
|
||||||
|
|
||||||
|
r = amdgpu_bo_reserve(vm->root.bo, false);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
|
||||||
|
if (!va_map) {
|
||||||
|
r = -EINVAL;
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
/* Only validate the userq whether resident in the VM mapping range */
|
||||||
|
if (user_addr >= va_map->start &&
|
||||||
|
va_map->last - user_addr + 1 >= size) {
|
||||||
|
amdgpu_bo_unreserve(vm->root.bo);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
out_err:
|
||||||
|
amdgpu_bo_unreserve(vm->root.bo);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
|
amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
|
||||||
struct amdgpu_usermode_queue *queue)
|
struct amdgpu_usermode_queue *queue)
|
||||||
|
|
@ -428,6 +460,14 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
|
||||||
r = -ENOMEM;
|
r = -ENOMEM;
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Validate the userq virtual address.*/
|
||||||
|
if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) ||
|
||||||
|
amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
|
||||||
|
amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
|
||||||
|
kfree(queue);
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
queue->doorbell_handle = args->in.doorbell_handle;
|
queue->doorbell_handle = args->in.doorbell_handle;
|
||||||
queue->queue_type = args->in.ip_type;
|
queue->queue_type = args->in.ip_type;
|
||||||
queue->vm = &fpriv->vm;
|
queue->vm = &fpriv->vm;
|
||||||
|
|
|
||||||
|
|
@ -137,4 +137,6 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
|
||||||
int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
|
int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
|
||||||
u32 idx);
|
u32 idx);
|
||||||
|
|
||||||
|
int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
|
||||||
|
u64 expected_size);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -254,6 +254,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
||||||
struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
|
struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
|
||||||
struct drm_amdgpu_userq_in *mqd_user = args_in;
|
struct drm_amdgpu_userq_in *mqd_user = args_in;
|
||||||
struct amdgpu_mqd_prop *userq_props;
|
struct amdgpu_mqd_prop *userq_props;
|
||||||
|
struct amdgpu_gfx_shadow_info shadow_info;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* Structure to initialize MQD for userqueue using generic MQD init function */
|
/* Structure to initialize MQD for userqueue using generic MQD init function */
|
||||||
|
|
@ -279,6 +280,8 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
||||||
userq_props->doorbell_index = queue->doorbell_index;
|
userq_props->doorbell_index = queue->doorbell_index;
|
||||||
userq_props->fence_address = queue->fence_drv->gpu_addr;
|
userq_props->fence_address = queue->fence_drv->gpu_addr;
|
||||||
|
|
||||||
|
if (adev->gfx.funcs->get_gfx_shadow_info)
|
||||||
|
adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
|
||||||
if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
|
if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
|
||||||
struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
|
struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
|
||||||
|
|
||||||
|
|
@ -295,6 +298,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
||||||
goto free_mqd;
|
goto free_mqd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va,
|
||||||
|
max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE)))
|
||||||
|
goto free_mqd;
|
||||||
|
|
||||||
userq_props->eop_gpu_addr = compute_mqd->eop_va;
|
userq_props->eop_gpu_addr = compute_mqd->eop_va;
|
||||||
userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
|
userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
|
||||||
userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
|
userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
|
||||||
|
|
@ -322,6 +329,11 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
||||||
userq_props->csa_addr = mqd_gfx_v11->csa_va;
|
userq_props->csa_addr = mqd_gfx_v11->csa_va;
|
||||||
userq_props->tmz_queue =
|
userq_props->tmz_queue =
|
||||||
mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
|
mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
|
||||||
|
|
||||||
|
if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va,
|
||||||
|
shadow_info.shadow_size))
|
||||||
|
goto free_mqd;
|
||||||
|
|
||||||
kfree(mqd_gfx_v11);
|
kfree(mqd_gfx_v11);
|
||||||
} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
|
} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
|
||||||
struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
|
struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
|
||||||
|
|
@ -339,6 +351,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
||||||
goto free_mqd;
|
goto free_mqd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va,
|
||||||
|
shadow_info.csa_size))
|
||||||
|
goto free_mqd;
|
||||||
|
|
||||||
userq_props->csa_addr = mqd_sdma_v11->csa_va;
|
userq_props->csa_addr = mqd_sdma_v11->csa_va;
|
||||||
kfree(mqd_sdma_v11);
|
kfree(mqd_sdma_v11);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue