summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c28
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c2
-rw-r--r--include/drm/gpu_scheduler.h21
3 files changed, 26 insertions, 25 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index b72cba292839..c013c2b49aa5 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -105,7 +105,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
/* We start in an idle state. */
complete_all(&entity->entity_idle);
- spin_lock_init(&entity->rq_lock);
+ spin_lock_init(&entity->lock);
spsc_queue_init(&entity->job_queue);
atomic_set(&entity->fence_seq, 0);
@@ -133,10 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
{
WARN_ON(!num_sched_list || !sched_list);
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
entity->sched_list = sched_list;
entity->num_sched_list = num_sched_list;
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
}
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
@@ -244,10 +244,10 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
if (!entity->rq)
return;
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
entity->stopped = true;
drm_sched_rq_remove_entity(entity->rq, entity);
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
/* Make sure this entity is not used by the scheduler at the moment */
wait_for_completion(&entity->entity_idle);
@@ -396,9 +396,9 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority)
{
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
entity->priority = priority;
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
@@ -515,10 +515,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
if (next) {
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
drm_sched_rq_update_fifo_locked(entity,
next->submit_ts);
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
}
}
@@ -559,14 +559,14 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
if (fence && !dma_fence_is_signaled(fence))
return;
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
rq = sched ? sched->sched_rq[entity->priority] : NULL;
if (rq != entity->rq) {
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
}
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
if (entity->num_sched_list == 1)
entity->sched_list = NULL;
@@ -605,9 +605,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
struct drm_sched_rq *rq;
/* Add the entity to the run queue */
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
if (entity->stopped) {
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
DRM_ERROR("Trying to push to a killed entity\n");
return;
@@ -621,7 +621,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
drm_sched_rq_update_fifo_locked(entity, submit_ts);
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
drm_sched_wakeup(sched);
}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 28083b0b05a9..fb53a4918c23 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -170,7 +170,7 @@ void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, ktime_t ts
* for entity from within concurrent drm_sched_entity_select_rq and the
* other to update the rb tree structure.
*/
- lockdep_assert_held(&entity->rq_lock);
+ lockdep_assert_held(&entity->lock);
spin_lock(&entity->rq->lock);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index b6d095074c19..e2b612817e3b 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -97,13 +97,21 @@ struct drm_sched_entity {
struct list_head list;
/**
+ * @lock:
+ *
+ * Lock protecting the run-queue (@rq) to which this entity belongs,
+ * @priority and the list of schedulers (@sched_list, @num_sched_list).
+ */
+ spinlock_t lock;
+
+ /**
* @rq:
*
* Runqueue on which this entity is currently scheduled.
*
* FIXME: Locking is very unclear for this. Writers are protected by
- * @rq_lock, but readers are generally lockless and seem to just race
- * with not even a READ_ONCE.
+ * @lock, but readers are generally lockless and seem to just race with
+ * not even a READ_ONCE.
*/
struct drm_sched_rq *rq;
@@ -136,18 +144,11 @@ struct drm_sched_entity {
* @priority:
*
* Priority of the entity. This can be modified by calling
- * drm_sched_entity_set_priority(). Protected by &rq_lock.
+ * drm_sched_entity_set_priority(). Protected by @lock.
*/
enum drm_sched_priority priority;
/**
- * @rq_lock:
- *
- * Lock to modify the runqueue to which this entity belongs.
- */
- spinlock_t rq_lock;
-
- /**
* @job_queue: the list of jobs of this entity.
*/
struct spsc_queue job_queue;