diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/display/intel_atomic_plane.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/display/intel_dp.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c | 81 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_busy.c | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_wait.c | 96 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_request.c | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_request.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_sw_fence.c | 53 |
8 files changed, 171 insertions, 165 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 0be8c00e3db9..cdc68fb51ba6 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -738,6 +738,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, i915_gem_object_wait_priority(obj, 0, &attr); if (!new_plane_state->uapi.fence) { /* implicit fencing */ + struct dma_resv_iter cursor; struct dma_fence *fence; ret = i915_sw_fence_await_reservation(&state->commit_ready, @@ -748,12 +749,12 @@ intel_prepare_plane_fb(struct drm_plane *_plane, if (ret < 0) goto unpin_fb; - fence = dma_resv_get_excl_unlocked(obj->base.resv); - if (fence) { + dma_resv_iter_begin(&cursor, obj->base.resv, false); + dma_resv_for_each_fence_unlocked(&cursor, fence) { add_rps_boost_after_vblank(new_plane_state->hw.crtc, fence); - dma_fence_put(fence); } + dma_resv_iter_end(&cursor); } else { add_rps_boost_after_vblank(new_plane_state->hw.crtc, new_plane_state->uapi.fence); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index be883469d2fc..8195452b2d4c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4960,7 +4960,7 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work) DRM_MODE_LINK_STATUS_BAD); mutex_unlock(&connector->dev->mode_config.mutex); /* Send Hotplug uevent so userspace can reprobe */ - drm_kms_helper_hotplug_event(connector->dev); + drm_kms_helper_connector_hotplug_event(connector); } bool diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 569d17b4d00f..8b9c925c4c16 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -282,6 +282,12 @@ intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u3 struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); + if (!panel->backlight.edp.vesa.info.aux_set) { + const u32 pwm_level = intel_backlight_level_to_pwm(connector, level); + + intel_backlight_set_pwm_level(conn_state, pwm_level); + } + drm_edp_backlight_set_level(&intel_dp->aux, &panel->backlight.edp.vesa.info, level); } @@ -293,6 +299,18 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state, struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); + if (!panel->backlight.edp.vesa.info.aux_enable) { + u32 pwm_level; + + if (!panel->backlight.edp.vesa.info.aux_set) + pwm_level = intel_backlight_level_to_pwm(connector, level); + else + pwm_level = intel_backlight_invert_pwm_level(connector, + panel->backlight.pwm_level_max); + + panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level); + } + drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level); } @@ -304,6 +322,10 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info); + + if (!panel->backlight.edp.vesa.info.aux_enable) + panel->backlight.pwm_funcs->disable(old_conn_state, + intel_backlight_invert_pwm_level(connector, 0)); } static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe) @@ -321,14 +343,36 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, if (ret < 0) return ret; - panel->backlight.max = panel->backlight.edp.vesa.info.max; - panel->backlight.min = 0; - if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { - panel->backlight.level = current_level; - panel->backlight.enabled = panel->backlight.level != 0; + if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) { + ret = panel->backlight.pwm_funcs->setup(connector, pipe); + if (ret < 0) { + drm_err(&i915->drm, + "Failed to setup PWM backlight controls for eDP backlight: %d\n", + ret); + return ret; + } + } + + if (panel->backlight.edp.vesa.info.aux_set) { + panel->backlight.max = panel->backlight.edp.vesa.info.max; + panel->backlight.min = 0; + if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { + panel->backlight.level = current_level; + panel->backlight.enabled = panel->backlight.level != 0; + } else { + panel->backlight.level = panel->backlight.max; + panel->backlight.enabled = false; + } } else { - panel->backlight.level = panel->backlight.max; - panel->backlight.enabled = false; + panel->backlight.max = panel->backlight.pwm_level_max; + panel->backlight.min = panel->backlight.pwm_level_min; + if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) { + panel->backlight.level = panel->backlight.pwm_funcs->get(connector, pipe); + panel->backlight.enabled = panel->backlight.pwm_enabled; + } else { + panel->backlight.level = panel->backlight.max; + panel->backlight.enabled = false; + } } return 0; @@ -340,12 +384,7 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector) struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_i915_private *i915 = dp_to_i915(intel_dp); - /* TODO: We currently only support AUX only backlight configurations, not backlights which - * require a mix of PWM and AUX controls to work. In the mean time, these machines typically - * work just fine using normal PWM controls anyway. - */ - if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) && - drm_edp_backlight_supported(intel_dp->edp_dpcd)) { + if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) { drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n"); return true; } @@ -417,11 +456,17 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) } /* - * A lot of eDP panels in the wild will report supporting both the - * Intel proprietary backlight control interface, and the VESA - * backlight control interface. Many of these panels are liars though, - * and will only work with the Intel interface. So, always probe for - * that first. + * Since Intel has their own backlight control interface, the majority of machines out there + * using DPCD backlight controls with Intel GPUs will be using this interface as opposed to + * the VESA interface. However, other GPUs (such as Nvidia's) will always use the VESA + * interface. This means that there's quite a number of panels out there that will advertise + * support for both interfaces, primarily systems with Intel/Nvidia hybrid GPU setups. + * + * There's a catch to this though: on many panels that advertise support for both + * interfaces, the VESA backlight interface will stop working once we've programmed the + * panel with Intel's OUI - which is also required for us to be able to detect Intel's + * backlight interface at all. This means that the only sensible way for us to detect both + * interfaces is to probe for Intel's first, and VESA's second. */ if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) { drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n"); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 7358bebef15c..470fdfd61a0f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -115,8 +115,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_busy *args = data; struct drm_i915_gem_object *obj; - struct dma_resv_list *list; - unsigned int seq; + struct dma_resv_iter cursor; + struct dma_fence *fence; int err; err = -ENOENT; @@ -142,27 +142,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * to report the overall busyness. This is what the wait-ioctl does. * */ -retry: - seq = raw_read_seqcount(&obj->base.resv->seq); - - /* Translate the exclusive fence to the READ *and* WRITE engine */ - args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv)); - - /* Translate shared fences to READ set of engines */ - list = dma_resv_shared_list(obj->base.resv); - if (list) { - unsigned int shared_count = list->shared_count, i; - - for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence = - rcu_dereference(list->shared[i]); - + args->busy = 0; + dma_resv_iter_begin(&cursor, obj->base.resv, true); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + if (dma_resv_iter_is_restarted(&cursor)) + args->busy = 0; + + if (dma_resv_iter_is_exclusive(&cursor)) + /* Translate the exclusive fence to the READ *and* WRITE engine */ + args->busy |= busy_check_writer(fence); + else + /* Translate shared fences to READ set of engines */ args->busy |= busy_check_reader(fence); - } } - - if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq)) - goto retry; + dma_resv_iter_end(&cursor); err = 0; out: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index f909aaa09d9c..f11325484110 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -25,7 +25,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence, return timeout; if (dma_fence_is_i915(fence)) - return i915_request_wait(to_request(fence), flags, timeout); + return i915_request_wait_timeout(to_request(fence), flags, timeout); return dma_fence_wait_timeout(fence, flags & I915_WAIT_INTERRUPTIBLE, @@ -37,58 +37,29 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, unsigned int flags, long timeout) { - struct dma_fence *excl; - bool prune_fences = false; - - if (flags & I915_WAIT_ALL) { - struct dma_fence **shared; - unsigned int count, i; - int ret; - - ret = dma_resv_get_fences(resv, &excl, &count, &shared); - if (ret) - return ret; - - for (i = 0; i < count; i++) { - timeout = i915_gem_object_wait_fence(shared[i], - flags, timeout); - if (timeout < 0) - break; - - dma_fence_put(shared[i]); - } - - for (; i < count; i++) - dma_fence_put(shared[i]); - kfree(shared); + struct dma_resv_iter cursor; + struct dma_fence *fence; + long ret = timeout ?: 1; + + dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + ret = i915_gem_object_wait_fence(fence, flags, timeout); + if (ret <= 0) + break; - /* - * If both shared fences and an exclusive fence exist, - * then by construction the shared fences must be later - * than the exclusive fence. If we successfully wait for - * all the shared fences, we know that the exclusive fence - * must all be signaled. If all the shared fences are - * signaled, we can prune the array and recover the - * floating references on the fences/requests. - */ - prune_fences = count && timeout >= 0; - } else { - excl = dma_resv_get_excl_unlocked(resv); + if (timeout) + timeout = ret; } - - if (excl && timeout >= 0) - timeout = i915_gem_object_wait_fence(excl, flags, timeout); - - dma_fence_put(excl); + dma_resv_iter_end(&cursor); /* * Opportunistically prune the fences iff we know they have *all* been * signaled. */ - if (prune_fences) + if (timeout > 0) dma_resv_prune(resv); - return timeout; + return ret; } static void fence_set_priority(struct dma_fence *fence, @@ -151,32 +122,13 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, const struct i915_sched_attr *attr) { - struct dma_fence *excl; - - if (flags & I915_WAIT_ALL) { - struct dma_fence **shared; - unsigned int count, i; - int ret; - - ret = dma_resv_get_fences(obj->base.resv, &excl, &count, - &shared); - if (ret) - return ret; - - for (i = 0; i < count; i++) { - i915_gem_fence_wait_priority(shared[i], attr); - dma_fence_put(shared[i]); - } - - kfree(shared); - } else { - excl = dma_resv_get_excl_unlocked(obj->base.resv); - } + struct dma_resv_iter cursor; + struct dma_fence *fence; - if (excl) { - i915_gem_fence_wait_priority(excl, attr); - dma_fence_put(excl); - } + dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL); + dma_resv_for_each_fence_unlocked(&cursor, fence) + i915_gem_fence_wait_priority(fence, attr); + dma_resv_iter_end(&cursor); return 0; } @@ -196,7 +148,11 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj, timeout = i915_gem_object_wait_reservation(obj->base.resv, flags, timeout); - return timeout < 0 ? timeout : 0; + + if (timeout < 0) + return timeout; + + return !timeout ? -ETIME : 0; } static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 820a1f38b271..42cd17357771 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -96,9 +96,9 @@ static signed long i915_fence_wait(struct dma_fence *fence, bool interruptible, signed long timeout) { - return i915_request_wait(to_request(fence), - interruptible | I915_WAIT_PRIORITY, - timeout); + return i915_request_wait_timeout(to_request(fence), + interruptible | I915_WAIT_PRIORITY, + timeout); } struct kmem_cache *i915_request_slab_cache(void) @@ -1857,23 +1857,27 @@ static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb) } /** - * i915_request_wait - wait until execution of request has finished + * i915_request_wait_timeout - wait until execution of request has finished * @rq: the request to wait upon * @flags: how to wait * @timeout: how long to wait in jiffies * - * i915_request_wait() waits for the request to be completed, for a + * i915_request_wait_timeout() waits for the request to be completed, for a * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an * unbounded wait). * * Returns the remaining time (in jiffies) if the request completed, which may - * be zero or -ETIME if the request is unfinished after the timeout expires. + * be zero if the request is unfinished after the timeout expires. + * If the timeout is 0, it will return 1 if the fence is signaled. + * * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is * pending before the request completes. + * + * NOTE: This function has the same wait semantics as dma-fence. */ -long i915_request_wait(struct i915_request *rq, - unsigned int flags, - long timeout) +long i915_request_wait_timeout(struct i915_request *rq, + unsigned int flags, + long timeout) { const int state = flags & I915_WAIT_INTERRUPTIBLE ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; @@ -1883,7 +1887,7 @@ long i915_request_wait(struct i915_request *rq, GEM_BUG_ON(timeout < 0); if (dma_fence_is_signaled(&rq->fence)) - return timeout; + return timeout ?: 1; if (!timeout) return -ETIME; @@ -1992,6 +1996,39 @@ out: return timeout; } +/** + * i915_request_wait - wait until execution of request has finished + * @rq: the request to wait upon + * @flags: how to wait + * @timeout: how long to wait in jiffies + * + * i915_request_wait() waits for the request to be completed, for a + * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an + * unbounded wait). + * + * Returns the remaining time (in jiffies) if the request completed, which may + * be zero or -ETIME if the request is unfinished after the timeout expires. + * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is + * pending before the request completes. + * + * NOTE: This function behaves differently from dma-fence wait semantics for + * timeout = 0. It returns 0 on success, and -ETIME if not signaled. + */ +long i915_request_wait(struct i915_request *rq, + unsigned int flags, + long timeout) +{ + long ret = i915_request_wait_timeout(rq, flags, timeout); + + if (!ret) + return -ETIME; + + if (ret > 0 && !timeout) + return 0; + + return ret; +} + static int print_sched_attr(const struct i915_sched_attr *attr, char *buf, int x, int len) { diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index dc359242d1ae..3c6e8acd1457 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -414,6 +414,11 @@ void i915_request_unsubmit(struct i915_request *request); void i915_request_cancel(struct i915_request *rq, int error); +long i915_request_wait_timeout(struct i915_request *rq, + unsigned int flags, + long timeout) + __attribute__((nonnull(1))); + long i915_request_wait(struct i915_request *rq, unsigned int flags, long timeout) diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index c589a681da77..7ea0dbf81530 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -572,56 +572,25 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, unsigned long timeout, gfp_t gfp) { - struct dma_fence *excl; + struct dma_resv_iter cursor; + struct dma_fence *f; int ret = 0, pending; debug_fence_assert(fence); might_sleep_if(gfpflags_allow_blocking(gfp)); - if (write) { - struct dma_fence **shared; - unsigned int count, i; - - ret = dma_resv_get_fences(resv, &excl, &count, &shared); - if (ret) - return ret; - - for (i = 0; i < count; i++) { - if (shared[i]->ops == exclude) - continue; - - pending = i915_sw_fence_await_dma_fence(fence, - shared[i], - timeout, - gfp); - if (pending < 0) { - ret = pending; - break; - } - - ret |= pending; - } - - for (i = 0; i < count; i++) - dma_fence_put(shared[i]); - kfree(shared); - } else { - excl = dma_resv_get_excl_unlocked(resv); - } - - if (ret >= 0 && excl && excl->ops != exclude) { - pending = i915_sw_fence_await_dma_fence(fence, - excl, - timeout, + dma_resv_iter_begin(&cursor, resv, write); + dma_resv_for_each_fence_unlocked(&cursor, f) { + pending = i915_sw_fence_await_dma_fence(fence, f, timeout, gfp); - if (pending < 0) + if (pending < 0) { ret = pending; - else - ret |= pending; - } - - dma_fence_put(excl); + break; + } + ret |= pending; + } + dma_resv_iter_end(&cursor); return ret; } |