struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
if (IS_ERR(p)) {
- dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
+ dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
return PTR_ERR(p);
}
{
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
- drm_gem_object_reference(&etnaviv_obj->base);
+ drm_gem_object_get(&etnaviv_obj->base);
mutex_lock(&etnaviv_obj->lock);
WARN_ON(mapping->use == 0);
mapping->use -= 1;
mutex_unlock(&etnaviv_obj->lock);
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
}
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
return ERR_PTR(ret);
/* Take a reference on the object */
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
return mapping;
}
bool write = !!(op & ETNA_PREP_WRITE);
int ret;
+ if (!etnaviv_obj->sgt) {
+ void *ret;
+
+ mutex_lock(&etnaviv_obj->lock);
+ ret = etnaviv_gem_get_pages(etnaviv_obj);
+ mutex_unlock(&etnaviv_obj->lock);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ }
+
if (op & ETNA_PREP_NOSYNC) {
if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
write))
}
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
- if (!etnaviv_obj->sgt) {
- void *ret;
-
- mutex_lock(&etnaviv_obj->lock);
- ret = etnaviv_gem_get_pages(etnaviv_obj);
- mutex_unlock(&etnaviv_obj->lock);
- if (IS_ERR(ret))
- return PTR_ERR(ret);
- }
-
dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
etnaviv_obj->sgt->nents,
etnaviv_op_to_dma_dir(op));
void etnaviv_gem_free_object(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct etnaviv_drm_private *priv = obj->dev->dev_private;
struct etnaviv_vram_mapping *mapping, *tmp;
/* object should not be active */
WARN_ON(is_active(etnaviv_obj));
+ mutex_lock(&priv->gem_lock);
list_del(&etnaviv_obj->gem_node);
+ mutex_unlock(&priv->gem_lock);
list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
obj_node) {
* going to pin these pages.
*/
mapping = obj->filp->f_mapping;
- mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
}
if (ret)
return obj;
fail:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
ret = etnaviv_gem_obj_add(dev, obj);
if (ret < 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
ret = etnaviv_gem_obj_add(dev, obj);
if (ret < 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
}
mutex_unlock(&etnaviv_obj->lock);
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
mmput(work->mm);
put_task_struct(work->task);
}
get_task_struct(current);
- drm_gem_object_reference(&etnaviv_obj->base);
+ drm_gem_object_get(&etnaviv_obj->base);
work->mm = mm;
work->task = current;
ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
unreference:
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
return ret;
}
struct etnaviv_gem_submit *submit;
size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit));
- submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (submit) {
submit->dev = dev;
submit->gpu = gpu;
* Take a refcount on the object. The file table lock
* prevents the object_idr's refcount on this being dropped.
*/
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
submit->bos[i].obj = to_etnaviv_bo(obj);
}
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
submit_unlock_object(submit, i);
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
}
ww_acquire_fini(&submit->ticket);
cmdbuf->user_size = ALIGN(args->stream_size, 8);
ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
- if (ret == 0)
- cmdbuf = NULL;
+ if (ret)
+ goto out;
+
+ cmdbuf = NULL;
if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
/*