struct list_head bucket[AMDGPU_CS_NUM_BUCKETS];
};
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser,
- int error, bool backoff);
-static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff);
-static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser);
-
static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b)
{
unsigned i;
return 0;
}
-static void amdgpu_job_work_func(struct work_struct *work)
-{
- struct amdgpu_cs_parser *sched_job =
- container_of(work, struct amdgpu_cs_parser,
- job_work);
- mutex_lock(&sched_job->job_lock);
- if (sched_job->free_job)
- sched_job->free_job(sched_job);
- mutex_unlock(&sched_job->job_lock);
- /* after processing job, free memory */
- kfree(sched_job);
-}
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
struct drm_file *filp,
struct amdgpu_ctx *ctx,
parser->ctx = ctx;
parser->ibs = ibs;
parser->num_ibs = num_ibs;
- if (amdgpu_enable_scheduler) {
- mutex_init(&parser->job_lock);
- INIT_WORK(&parser->job_work, amdgpu_job_work_func);
- }
for (i = 0; i < num_ibs; i++)
ibs[i].ctx = ctx;
uint64_t *chunk_array_user;
uint64_t *chunk_array = NULL;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct amdgpu_bo_list *bo_list = NULL;
unsigned size, i;
int r = 0;
r = -EINVAL;
goto out;
}
- bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
- if (bo_list && !bo_list->has_userptr) {
- p->bo_list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
- if (!p->bo_list)
- return -ENOMEM;
- amdgpu_bo_list_copy(p->adev, p->bo_list, bo_list);
- amdgpu_bo_list_put(bo_list);
- } else if (bo_list && bo_list->has_userptr)
- p->bo_list = bo_list;
- else
- p->bo_list = NULL;
+ p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
/* get chunks */
INIT_LIST_HEAD(&p->validated);
}
- p->ibs = kmalloc_array(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
+ p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
if (!p->ibs)
r = -ENOMEM;
return max(bytes_moved_threshold, 1024*1024ull);
}
-int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
+int amdgpu_cs_list_validate(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct list_head *validated)
{
- struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_device *adev = p->adev;
struct amdgpu_bo_list_entry *lobj;
- struct list_head duplicates;
struct amdgpu_bo *bo;
u64 bytes_moved = 0, initial_bytes_moved;
u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
int r;
- INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
- if (unlikely(r != 0)) {
- return r;
- }
-
- list_for_each_entry(lobj, &p->validated, tv.head) {
+ list_for_each_entry(lobj, validated, tv.head) {
bo = lobj->robj;
if (!bo->pin_count) {
u32 domain = lobj->prefered_domains;
* into account. We don't want to disallow buffer moves
* completely.
*/
- if (current_domain != AMDGPU_GEM_DOMAIN_CPU &&
+ if ((lobj->allowed_domains & current_domain) != 0 &&
(domain & current_domain) == 0 && /* will be moved */
bytes_moved > bytes_moved_threshold) {
/* don't move it */
domain = lobj->allowed_domains;
goto retry;
}
- ttm_eu_backoff_reservation(&p->ticket, &p->validated);
return r;
}
}
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_cs_buckets buckets;
+ struct list_head duplicates;
bool need_mmap_lock = false;
int i, r;
if (need_mmap_lock)
down_read(¤t->mm->mmap_sem);
- r = amdgpu_cs_list_validate(p);
+ INIT_LIST_HEAD(&duplicates);
+ r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
+ if (unlikely(r != 0))
+ goto error_reserve;
+
+ r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
+ if (r)
+ goto error_validate;
+
+ r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
+error_validate:
+ if (r)
+ ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+
+error_reserve:
if (need_mmap_lock)
up_read(¤t->mm->mmap_sem);
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
}
-/**
- * cs_parser_fini() - clean parser states
- * @parser: parser structure holding parsing context.
- * @error: error number
- *
- * If error is set than unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
-{
- amdgpu_cs_parser_fini_early(parser, error, backoff);
- amdgpu_cs_parser_fini_late(parser);
-}
-
-static int amdgpu_cs_parser_run_job(
- struct amdgpu_cs_parser *sched_job)
-{
- amdgpu_cs_parser_fini_early(sched_job, 0, true);
- return 0;
-}
-
-static int amdgpu_cs_parser_free_job(
- struct amdgpu_cs_parser *sched_job)
-{
- amdgpu_cs_parser_fini_late(sched_job);
- return 0;
-}
-
static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
{
if (!error) {
unsigned i;
if (parser->ctx)
amdgpu_ctx_put(parser->ctx);
- if (parser->bo_list) {
- if (!parser->bo_list->has_userptr)
- amdgpu_bo_list_free(parser->bo_list);
- else
- amdgpu_bo_list_put(parser->bo_list);
- }
+ if (parser->bo_list)
+ amdgpu_bo_list_put(parser->bo_list);
+
drm_free_large(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
- if (parser->ibs)
- for (i = 0; i < parser->num_ibs; i++)
- amdgpu_ib_free(parser->adev, &parser->ibs[i]);
- kfree(parser->ibs);
- if (parser->uf.bo)
- drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
-
if (!amdgpu_enable_scheduler)
- kfree(parser);
+ {
+ if (parser->ibs)
+ for (i = 0; i < parser->num_ibs; i++)
+ amdgpu_ib_free(parser->adev, &parser->ibs[i]);
+ kfree(parser->ibs);
+ if (parser->uf.bo)
+ drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+ }
+
+ kfree(parser);
+}
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser: parser structure holding parsing context.
+ * @error: error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+{
+ amdgpu_cs_parser_fini_early(parser, error, backoff);
+ amdgpu_cs_parser_fini_late(parser);
}
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (r)
return r;
+ r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence);
+ if (r)
+ return r;
+
r = amdgpu_vm_clear_freed(adev, vm);
if (r)
return r;
if (r)
return r;
- f = &bo_va->last_pt_update->base;
+ f = bo_va->last_pt_update;
r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
if (r)
return r;
ib->oa_size = amdgpu_bo_size(oa);
}
}
-
/* wrap the last IB with user fence */
if (parser->uf.bo) {
struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
return 0;
}
-static int amdgpu_cs_parser_prepare_job(struct amdgpu_cs_parser *sched_job)
+static int amdgpu_cs_free_job(struct amdgpu_job *job)
{
- int r, i;
- struct amdgpu_cs_parser *parser = sched_job;
- struct amdgpu_device *adev = sched_job->adev;
- bool reserved_buffers = false;
-
- r = amdgpu_cs_parser_relocs(parser);
- if (r) {
- if (r != -ERESTARTSYS) {
- if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
- else
- DRM_ERROR("Failed to process the buffer list %d!\n", r);
- }
- }
-
- if (!r) {
- reserved_buffers = true;
- r = amdgpu_cs_ib_fill(adev, parser);
- }
- if (!r) {
- r = amdgpu_cs_dependencies(adev, parser);
- if (r)
- DRM_ERROR("Failed in the dependencies handling %d!\n", r);
- }
- if (r) {
- amdgpu_cs_parser_fini(parser, r, reserved_buffers);
- return r;
- }
-
- for (i = 0; i < parser->num_ibs; i++)
- trace_amdgpu_cs(parser, i);
-
- r = amdgpu_cs_ib_vm_chunk(adev, parser);
- return r;
-}
-
-static struct amdgpu_ring *amdgpu_cs_parser_get_ring(
- struct amdgpu_device *adev,
- struct amdgpu_cs_parser *parser)
-{
- int i, r;
-
- struct amdgpu_cs_chunk *chunk;
- struct drm_amdgpu_cs_chunk_ib *chunk_ib;
- struct amdgpu_ring *ring;
- for (i = 0; i < parser->nchunks; i++) {
- chunk = &parser->chunks[i];
- chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
-
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
- continue;
-
- r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
- chunk_ib->ip_instance, chunk_ib->ring,
- &ring);
- if (r)
- return NULL;
- break;
- }
- return ring;
+ int i;
+ if (job->ibs)
+ for (i = 0; i < job->num_ibs; i++)
+ amdgpu_ib_free(job->adev, &job->ibs[i]);
+ kfree(job->ibs);
+ if (job->uf.bo)
+ drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
+ return 0;
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct amdgpu_device *adev = dev->dev_private;
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser *parser;
- int r;
+ bool reserved_buffers = false;
+ int i, r;
down_read(&adev->exclusive_lock);
if (!adev->accel_working) {
return r;
}
+ r = amdgpu_cs_parser_relocs(parser);
+ if (r == -ENOMEM)
+ DRM_ERROR("Not enough memory for command submission!\n");
+ else if (r && r != -ERESTARTSYS)
+ DRM_ERROR("Failed to process the buffer list %d!\n", r);
+ else if (!r) {
+ reserved_buffers = true;
+ r = amdgpu_cs_ib_fill(adev, parser);
+ }
+
+ if (!r) {
+ r = amdgpu_cs_dependencies(adev, parser);
+ if (r)
+ DRM_ERROR("Failed in the dependencies handling %d!\n", r);
+ }
+
+ if (r)
+ goto out;
+
+ for (i = 0; i < parser->num_ibs; i++)
+ trace_amdgpu_cs(parser, i);
+
+ r = amdgpu_cs_ib_vm_chunk(adev, parser);
+ if (r)
+ goto out;
+
if (amdgpu_enable_scheduler && parser->num_ibs) {
- struct amdgpu_ring * ring =
- amdgpu_cs_parser_get_ring(adev, parser);
- parser->uf.sequence = atomic64_inc_return(
- &parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq);
- if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) {
- r = amdgpu_cs_parser_prepare_job(parser);
- if (r)
- goto out;
- } else
- parser->prepare_job = amdgpu_cs_parser_prepare_job;
- parser->ring = ring;
- parser->run_job = amdgpu_cs_parser_run_job;
- parser->free_job = amdgpu_cs_parser_free_job;
- amd_sched_push_job(ring->scheduler,
- &parser->ctx->rings[ring->idx].c_entity,
- parser);
- cs->out.handle = parser->uf.sequence;
+ struct amdgpu_job *job;
+ struct amdgpu_ring * ring = parser->ibs->ring;
+ job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+ job->base.sched = &ring->sched;
+ job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
+ job->adev = parser->adev;
+ job->ibs = parser->ibs;
+ job->num_ibs = parser->num_ibs;
+ job->base.owner = parser->filp;
+ mutex_init(&job->job_lock);
+ if (job->ibs[job->num_ibs - 1].user) {
+ memcpy(&job->uf, &parser->uf,
+ sizeof(struct amdgpu_user_fence));
+ job->ibs[job->num_ibs - 1].user = &job->uf;
+ }
+
+ job->free_job = amdgpu_cs_free_job;
+ mutex_lock(&job->job_lock);
+ r = amd_sched_entity_push_job(&job->base);
+ if (r) {
+ mutex_unlock(&job->job_lock);
+ amdgpu_cs_free_job(job);
+ kfree(job);
+ goto out;
+ }
+ cs->out.handle =
+ amdgpu_ctx_add_fence(parser->ctx, ring,
+ &job->base.s_fence->base);
+ parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
+
+ list_sort(NULL, &parser->validated, cmp_size_smaller_first);
+ ttm_eu_fence_buffer_objects(&parser->ticket,
+ &parser->validated,
+ &job->base.s_fence->base);
+
+ mutex_unlock(&job->job_lock);
+ amdgpu_cs_parser_fini_late(parser);
up_read(&adev->exclusive_lock);
return 0;
}
- r = amdgpu_cs_parser_prepare_job(parser);
- if (r)
- goto out;
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
out:
- amdgpu_cs_parser_fini(parser, r, true);
+ amdgpu_cs_parser_fini(parser, r, reserved_buffers);
up_read(&adev->exclusive_lock);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
if (!reloc->bo_va)
continue;
- list_for_each_entry(mapping, &reloc->bo_va->mappings, list) {
+ list_for_each_entry(mapping, &reloc->bo_va->valids, list) {
+ if (mapping->it.start > addr ||
+ addr > mapping->it.last)
+ continue;
+
+ *bo = reloc->bo_va->bo;
+ return mapping;
+ }
+
+ list_for_each_entry(mapping, &reloc->bo_va->invalids, list) {
if (mapping->it.start > addr ||
addr > mapping->it.last)
continue;