summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/etnaviv
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/etnaviv')
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c16
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c84
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c65
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.h3
8 files changed, 61 insertions, 128 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 0b756ecb1bc2..1d2b4fb4bcf8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -574,18 +574,6 @@ static const struct component_master_ops etnaviv_master_ops = {
.unbind = etnaviv_unbind,
};
-static int compare_of(struct device *dev, void *data)
-{
- struct device_node *np = data;
-
- return dev->of_node == np;
-}
-
-static int compare_str(struct device *dev, void *data)
-{
- return !strcmp(dev_name(dev), data);
-}
-
static int etnaviv_pdev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -603,14 +591,14 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
first_node = core_node;
drm_of_component_match_add(&pdev->dev, &match,
- compare_of, core_node);
+ component_compare_of, core_node);
}
} else {
char **names = dev->platform_data;
unsigned i;
for (i = 0; names[i]; i++)
- component_match_add(dev, &match, compare_str, names[i]);
+ component_match_add(dev, &match, component_compare_dev_name, names[i]);
}
/*
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 049ae87de9be..f32f4771dada 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -49,7 +49,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
-int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
+int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index a68e6a17505e..cc386f8a7116 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -377,12 +377,14 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (op & ETNA_PREP_NOSYNC) {
- if (!dma_resv_test_signaled(obj->resv, write))
+ if (!dma_resv_test_signaled(obj->resv,
+ dma_resv_usage_rw(write)))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
- ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
+ ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
+ true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 98e60df882b6..63688e6e4580 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -80,9 +80,6 @@ struct etnaviv_gem_submit_bo {
u64 va;
struct etnaviv_gem_object *obj;
struct etnaviv_vram_mapping *mapping;
- struct dma_fence *excl;
- unsigned int nr_shared;
- struct dma_fence **shared;
};
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
@@ -95,7 +92,7 @@ struct etnaviv_gem_submit {
struct etnaviv_file_private *ctx;
struct etnaviv_gpu *gpu;
struct etnaviv_iommu_context *mmu_context, *prev_mmu_context;
- struct dma_fence *out_fence, *in_fence;
+ struct dma_fence *out_fence;
int out_fence_id;
struct list_head node; /* GPU active submit list */
struct etnaviv_cmdbuf cmdbuf;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 6788ea8490d1..3fa2da149639 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -25,14 +25,14 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages);
}
-int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
+int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
void *vaddr;
vaddr = etnaviv_gem_vmap(obj);
if (!vaddr)
return -ENOMEM;
- dma_buf_map_set_vaddr(map, vaddr);
+ iosys_map_set_vaddr(map, vaddr);
return 0;
}
@@ -62,7 +62,7 @@ void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
{
- struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(etnaviv_obj->vaddr);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
if (etnaviv_obj->vaddr)
dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, &map);
@@ -77,7 +77,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
{
- struct dma_buf_map map;
+ struct iosys_map map;
int ret;
lockdep_assert_held(&etnaviv_obj->lock);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index a17313282e8b..1ac916b24891 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -179,25 +179,18 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
struct dma_resv *robj = bo->obj->base.resv;
- if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
- ret = dma_resv_reserve_shared(robj, 1);
- if (ret)
- return ret;
- }
+ ret = dma_resv_reserve_fences(robj, 1);
+ if (ret)
+ return ret;
if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
continue;
- if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
- ret = dma_resv_get_fences(robj, NULL,
- &bo->nr_shared,
- &bo->shared);
- if (ret)
- return ret;
- } else {
- bo->excl = dma_fence_get(dma_resv_excl_fence(robj));
- }
-
+ ret = drm_sched_job_add_implicit_dependencies(&submit->sched_job,
+ &bo->obj->base,
+ bo->flags & ETNA_SUBMIT_BO_WRITE);
+ if (ret)
+ return ret;
}
return ret;
@@ -209,14 +202,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
- if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(obj->resv,
- submit->out_fence);
- else
- dma_resv_add_shared_fence(obj->resv,
- submit->out_fence);
-
+ dma_resv_add_fence(obj->resv, submit->out_fence, write ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
submit_unlock_object(submit, i);
}
}
@@ -403,8 +392,6 @@ static void submit_cleanup(struct kref *kref)
wake_up_all(&submit->gpu->fence_event);
- if (submit->in_fence)
- dma_fence_put(submit->in_fence);
if (submit->out_fence) {
/* first remove from IDR, so fence can not be found anymore */
mutex_lock(&submit->gpu->fence_lock);
@@ -535,58 +522,69 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
ALIGN(args->stream_size, 8) + 8);
if (ret)
- goto err_submit_objects;
+ goto err_submit_put;
submit->ctx = file->driver_priv;
submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
submit->exec_state = args->exec_state;
submit->flags = args->flags;
+ ret = drm_sched_job_init(&submit->sched_job,
+ &ctx->sched_entity[args->pipe],
+ submit->ctx);
+ if (ret)
+ goto err_submit_put;
+
ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
relocs, args->nr_relocs)) {
ret = -EINVAL;
- goto err_submit_objects;
+ goto err_submit_job;
}
if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
- submit->in_fence = sync_file_get_fence(args->fence_fd);
- if (!submit->in_fence) {
+ struct dma_fence *in_fence = sync_file_get_fence(args->fence_fd);
+ if (!in_fence) {
ret = -EINVAL;
- goto err_submit_objects;
+ goto err_submit_job;
}
+
+ ret = drm_sched_job_add_dependency(&submit->sched_job,
+ in_fence);
+ if (ret)
+ goto err_submit_job;
}
ret = submit_pin_objects(submit);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
ret = submit_reloc(submit, stream, args->stream_size / 4,
relocs, args->nr_relocs);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
ret = submit_lock_objects(submit, &ticket);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
ret = submit_fence_sync(submit);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
- ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
+ ret = etnaviv_sched_push_job(submit);
if (ret)
- goto err_submit_objects;
+ goto err_submit_job;
submit_attach_object_fences(submit);
@@ -600,7 +598,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
sync_file = sync_file_create(submit->out_fence);
if (!sync_file) {
ret = -ENOMEM;
- goto err_submit_objects;
+ /*
+ * When this late error is hit, the submit has already
+ * been handed over to the scheduler. At this point
+ * the sched_job must not be cleaned up.
+ */
+ goto err_submit_put;
}
fd_install(out_fence_fd, sync_file->file);
}
@@ -608,7 +611,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
args->fence_fd = out_fence_fd;
args->fence = submit->out_fence_id;
-err_submit_objects:
+err_submit_job:
+ if (ret)
+ drm_sched_job_cleanup(&submit->sched_job);
+err_submit_put:
etnaviv_submit_put(submit);
err_submit_ww_acquire:
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 58f593b278c1..72e2553fbc98 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -17,58 +17,6 @@ module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
static int etnaviv_hw_jobs_limit = 4;
module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
-static struct dma_fence *
-etnaviv_sched_dependency(struct drm_sched_job *sched_job,
- struct drm_sched_entity *entity)
-{
- struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
- struct dma_fence *fence;
- int i;
-
- if (unlikely(submit->in_fence)) {
- fence = submit->in_fence;
- submit->in_fence = NULL;
-
- if (!dma_fence_is_signaled(fence))
- return fence;
-
- dma_fence_put(fence);
- }
-
- for (i = 0; i < submit->nr_bos; i++) {
- struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
- int j;
-
- if (bo->excl) {
- fence = bo->excl;
- bo->excl = NULL;
-
- if (!dma_fence_is_signaled(fence))
- return fence;
-
- dma_fence_put(fence);
- }
-
- for (j = 0; j < bo->nr_shared; j++) {
- if (!bo->shared[j])
- continue;
-
- fence = bo->shared[j];
- bo->shared[j] = NULL;
-
- if (!dma_fence_is_signaled(fence))
- return fence;
-
- dma_fence_put(fence);
- }
- kfree(bo->shared);
- bo->nr_shared = 0;
- bo->shared = NULL;
- }
-
- return NULL;
-}
-
static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
@@ -142,29 +90,22 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
}
static const struct drm_sched_backend_ops etnaviv_sched_ops = {
- .dependency = etnaviv_sched_dependency,
.run_job = etnaviv_sched_run_job,
.timedout_job = etnaviv_sched_timedout_job,
.free_job = etnaviv_sched_free_job,
};
-int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
- struct etnaviv_gem_submit *submit)
+int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
{
int ret = 0;
/*
* Hold the fence lock across the whole operation to avoid jobs being
* pushed out of order with regard to their sched fence seqnos as
- * allocated in drm_sched_job_init.
+ * allocated in drm_sched_job_arm.
*/
mutex_lock(&submit->gpu->fence_lock);
- ret = drm_sched_job_init(&submit->sched_job, sched_entity,
- submit->ctx);
- if (ret)
- goto out_unlock;
-
drm_sched_job_arm(&submit->sched_job);
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
@@ -195,7 +136,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
msecs_to_jiffies(500), NULL, NULL,
- dev_name(gpu->dev));
+ dev_name(gpu->dev), gpu->dev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.h b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
index c0a6796e22c9..baebfa069afc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
@@ -18,7 +18,6 @@ struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
int etnaviv_sched_init(struct etnaviv_gpu *gpu);
void etnaviv_sched_fini(struct etnaviv_gpu *gpu);
-int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
- struct etnaviv_gem_submit *submit);
+int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit);
#endif /* __ETNAVIV_SCHED_H__ */