summaryrefslogtreecommitdiffstats
path: root/drivers/ufs/core/ufshcd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/core/ufshcd.c')
-rw-r--r--drivers/ufs/core/ufshcd.c327
1 files changed, 141 insertions, 186 deletions
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 8a01e4393159..cd404ade48dc 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -258,10 +258,15 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
return UFS_PM_LVL_0;
}
+static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
+{
+ return hba->outstanding_tasks || hba->active_uic_cmd ||
+ hba->uic_async_done;
+}
+
static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
{
- return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks ||
- hba->active_uic_cmd || hba->uic_async_done);
+ return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba);
}
static const struct ufs_dev_quirk ufs_fixups[] = {
@@ -1447,16 +1452,16 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.suspend_work);
- unsigned long irq_flags;
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (hba->clk_scaling.active_reqs ||
+ hba->clk_scaling.is_suspended)
+ return;
+
+ hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
}
- hba->clk_scaling.is_suspended = true;
- hba->clk_scaling.window_start_t = 0;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_suspend_device(hba->devfreq);
}
@@ -1465,15 +1470,13 @@ static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.resume_work);
- unsigned long irq_flags;
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (!hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (!hba->clk_scaling.is_suspended)
+ return;
+ hba->clk_scaling.is_suspended = false;
}
- hba->clk_scaling.is_suspended = false;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_resume_device(hba->devfreq);
}
@@ -1487,7 +1490,6 @@ static int ufshcd_devfreq_target(struct device *dev,
bool scale_up = false, sched_clk_scaling_suspend_work = false;
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
- unsigned long irq_flags;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
@@ -1508,43 +1510,38 @@ static int ufshcd_devfreq_target(struct device *dev,
*freq = (unsigned long) clk_round_rate(clki->clk, *freq);
}
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (ufshcd_eh_in_progress(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return 0;
- }
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (ufshcd_eh_in_progress(hba))
+ return 0;
- /* Skip scaling clock when clock scaling is suspended */
- if (hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- dev_warn(hba->dev, "clock scaling is suspended, skip");
- return 0;
- }
+ /* Skip scaling clock when clock scaling is suspended */
+ if (hba->clk_scaling.is_suspended) {
+ dev_warn(hba->dev, "clock scaling is suspended, skip");
+ return 0;
+ }
- if (!hba->clk_scaling.active_reqs)
- sched_clk_scaling_suspend_work = true;
+ if (!hba->clk_scaling.active_reqs)
+ sched_clk_scaling_suspend_work = true;
- if (list_empty(clk_list)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- goto out;
- }
+ if (list_empty(clk_list))
+ goto out;
- /* Decide based on the target or rounded-off frequency and update */
- if (hba->use_pm_opp)
- scale_up = *freq > hba->clk_scaling.target_freq;
- else
- scale_up = *freq == clki->max_freq;
+ /* Decide based on the target or rounded-off frequency and update */
+ if (hba->use_pm_opp)
+ scale_up = *freq > hba->clk_scaling.target_freq;
+ else
+ scale_up = *freq == clki->max_freq;
- if (!hba->use_pm_opp && !scale_up)
- *freq = clki->min_freq;
+ if (!hba->use_pm_opp && !scale_up)
+ *freq = clki->min_freq;
- /* Update the frequency */
- if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- ret = 0;
- goto out; /* no state change required */
+ /* Update the frequency */
+ if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
+ ret = 0;
+ goto out; /* no state change required */
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
@@ -1569,7 +1566,6 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
ktime_t curr_t;
if (!ufshcd_is_clkscaling_supported(hba))
@@ -1577,7 +1573,8 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
memset(stat, 0, sizeof(*stat));
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
curr_t = ktime_get();
if (!scaling->window_start_t)
goto start_window;
@@ -1613,7 +1610,7 @@ start_window:
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return 0;
}
@@ -1677,19 +1674,19 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba)
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
- unsigned long flags;
bool suspend = false;
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!hba->clk_scaling.is_suspended) {
- suspend = true;
- hba->clk_scaling.is_suspended = true;
- hba->clk_scaling.window_start_t = 0;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (!hba->clk_scaling.is_suspended) {
+ suspend = true;
+ hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (suspend)
devfreq_suspend_device(hba->devfreq);
@@ -1697,15 +1694,15 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
{
- unsigned long flags;
bool resume = false;
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_scaling.is_suspended) {
- resume = true;
- hba->clk_scaling.is_suspended = false;
+ scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+ {
+ if (hba->clk_scaling.is_suspended) {
+ resume = true;
+ hba->clk_scaling.is_suspended = false;
+ }
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (resume)
devfreq_resume_device(hba->devfreq);
@@ -1791,6 +1788,8 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
INIT_WORK(&hba->clk_scaling.resume_work,
ufshcd_clk_scaling_resume_work);
+ spin_lock_init(&hba->clk_scaling.lock);
+
hba->clk_scaling.workq = alloc_ordered_workqueue(
"ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
@@ -1811,19 +1810,16 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
- unsigned long flags;
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.state == CLKS_ON) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return;
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+ if (hba->clk_gating.state == CLKS_ON)
+ return;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_hba_vreg_set_hpm(hba);
ufshcd_setup_clocks(hba, true);
@@ -1858,7 +1854,7 @@ void ufshcd_hold(struct ufs_hba *hba)
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized)
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
hba->clk_gating.active_reqs++;
start:
@@ -1874,11 +1870,11 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work);
if (hba->clk_gating.is_suspended && !flush_result)
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
}
break;
@@ -1907,17 +1903,17 @@ start:
*/
fallthrough;
case REQ_CLKS_ON:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */
- spin_lock_irqsave(hba->host->host_lock, flags);
+ spin_lock_irqsave(&hba->clk_gating.lock, flags);
goto start;
default:
dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
__func__, hba->clk_gating.state);
break;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -1925,28 +1921,32 @@ static void ufshcd_gate_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.gate_work.work);
- unsigned long flags;
int ret;
- spin_lock_irqsave(hba->host->host_lock, flags);
- /*
- * In case you are here to cancel this work the gating state
- * would be marked as REQ_CLKS_ON. In this case save time by
- * skipping the gating work and exit after changing the clock
- * state to CLKS_ON.
- */
- if (hba->clk_gating.is_suspended ||
- (hba->clk_gating.state != REQ_CLKS_OFF)) {
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- goto rel_lock;
- }
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ hba->clk_gating.state != REQ_CLKS_OFF) {
+ hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ return;
+ }
- if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
- goto rel_lock;
+ if (hba->clk_gating.active_reqs)
+ return;
+ }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ scoped_guard(spinlock_irqsave, hba->host->host_lock) {
+ if (ufshcd_is_ufs_dev_busy(hba) ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ return;
+ }
/* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) {
@@ -1957,7 +1957,7 @@ static void ufshcd_gate_work(struct work_struct *work)
__func__, ret);
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- goto out;
+ return;
}
ufshcd_set_link_hibern8(hba);
}
@@ -1977,33 +1977,34 @@ static void ufshcd_gate_work(struct work_struct *work)
* prevent from doing cancel work multiple times when there are
* new requests arriving before the current cancel work is done.
*/
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
-rel_lock:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- return;
}
-/* host lock must be held before calling this variant */
static void __ufshcd_release(struct ufs_hba *hba)
{
+ lockdep_assert_held(&hba->clk_gating.lock);
+
if (!ufshcd_is_clkgating_allowed(hba))
return;
hba->clk_gating.active_reqs--;
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
- hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
- hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
- hba->active_uic_cmd || hba->uic_async_done ||
+ !hba->clk_gating.is_initialized ||
hba->clk_gating.state == CLKS_OFF)
return;
+ scoped_guard(spinlock_irqsave, hba->host->host_lock) {
+ if (ufshcd_has_pending_tasks(hba) ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ return;
+ }
+
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
queue_delayed_work(hba->clk_gating.clk_gating_workq,
@@ -2013,11 +2014,8 @@ static void __ufshcd_release(struct ufs_hba *hba)
void ufshcd_release(struct ufs_hba *hba)
{
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
__ufshcd_release(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_release);
@@ -2032,11 +2030,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
hba->clk_gating.delay_ms = value;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
@@ -2064,7 +2060,6 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
u32 value;
if (kstrtou32(buf, 0, &value))
@@ -2072,9 +2067,10 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
value = !!value;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_gating.lock);
+
if (value == hba->clk_gating.is_enabled)
- goto out;
+ return count;
if (value)
__ufshcd_release(hba);
@@ -2082,8 +2078,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
hba->clk_gating.active_reqs++;
hba->clk_gating.is_enabled = value;
-out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return count;
}
@@ -2125,6 +2120,8 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+ spin_lock_init(&hba->clk_gating.lock);
+
hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
"ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
hba->host->host_no);
@@ -2154,19 +2151,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
ktime_t curr_t = ktime_get();
- unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
- if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
return;
- }
if (queue_resume_work)
queue_work(hba->clk_scaling.workq,
@@ -2182,18 +2177,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
hba->clk_scaling.busy_start_t = curr_t;
hba->clk_scaling.is_busy_started = true;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
+ guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
hba->clk_scaling.active_reqs--;
if (!scaling->active_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
@@ -2201,7 +2195,6 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static inline int ufshcd_monitor_opcode2dir(u8 opcode)
@@ -2418,12 +2411,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
else
hba->lsdb_sup = true;
- if (!hba->mcq_sup)
- return 0;
-
hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
- hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
- hba->mcq_capabilities);
return 0;
}
@@ -4812,20 +4800,14 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
*/
void ufshcd_hba_stop(struct ufs_hba *hba)
{
- unsigned long flags;
int err;
- /*
- * Obtain the host lock to prevent that the controller is disabled
- * while the UFS interrupt handler is active on another CPU.
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_disable_irq(hba);
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
CONTROLLER_ENABLE, CONTROLLER_DISABLE,
10, 1);
+ ufshcd_enable_irq(hba);
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
@@ -5195,12 +5177,12 @@ set_qdepth:
}
/**
- * ufshcd_slave_alloc - handle initial SCSI device configurations
+ * ufshcd_sdev_init - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
*
* Return: success.
*/
-static int ufshcd_slave_alloc(struct scsi_device *sdev)
+static int ufshcd_sdev_init(struct scsi_device *sdev)
{
struct ufs_hba *hba;
@@ -5243,14 +5225,14 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
}
/**
- * ufshcd_device_configure - adjust SCSI device configurations
+ * ufshcd_sdev_configure - adjust SCSI device configurations
* @sdev: pointer to SCSI device
* @lim: queue limits
*
* Return: 0 (success).
*/
-static int ufshcd_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int ufshcd_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
@@ -5281,10 +5263,10 @@ static int ufshcd_device_configure(struct scsi_device *sdev,
}
/**
- * ufshcd_slave_destroy - remove SCSI device configurations
+ * ufshcd_sdev_destroy - remove SCSI device configurations
* @sdev: pointer to SCSI device
*/
-static void ufshcd_slave_destroy(struct scsi_device *sdev)
+static void ufshcd_sdev_destroy(struct scsi_device *sdev)
{
struct ufs_hba *hba;
unsigned long flags;
@@ -8133,31 +8115,6 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
-static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
-{
- struct ufs_dev_info *dev_info = &hba->dev_info;
- u32 ext_ufs_feature;
- u32 ext_iid_en = 0;
- int err;
-
- /* Only UFS-4.0 and above may support EXT_IID */
- if (dev_info->wspecversion < 0x400)
- goto out;
-
- ext_ufs_feature = get_unaligned_be32(desc_buf +
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
- if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
- goto out;
-
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
- if (err)
- dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
-
-out:
- dev_info->b_ext_iid_en = ext_iid_en;
-}
-
static void ufshcd_set_rtt(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
@@ -8259,7 +8216,9 @@ static void ufshcd_rtc_work(struct work_struct *work)
hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
/* Update RTC only when there are no requests in progress and UFSHCI is operational */
- if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
+ if (!ufshcd_is_ufs_dev_busy(hba) &&
+ hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL &&
+ !hba->clk_gating.active_reqs)
ufshcd_update_rtc(hba);
if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
@@ -8351,9 +8310,6 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufs_init_rtc(hba, desc_buf);
- if (hba->ext_iid_sup)
- ufshcd_ext_iid_probe(hba, desc_buf);
-
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -8858,7 +8814,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
ufshcd_device_reset(hba);
ufs_put_device_desc(hba);
ufshcd_hba_stop(hba);
- ufshcd_vops_reinit_notify(hba);
ret = ufshcd_hba_enable(hba);
if (ret) {
dev_err(hba->dev, "Host controller enable failed\n");
@@ -8968,9 +8923,9 @@ static const struct scsi_host_template ufshcd_driver_template = {
.map_queues = ufshcd_map_queues,
.queuecommand = ufshcd_queuecommand,
.mq_poll = ufshcd_poll,
- .slave_alloc = ufshcd_slave_alloc,
- .device_configure = ufshcd_device_configure,
- .slave_destroy = ufshcd_slave_destroy,
+ .sdev_init = ufshcd_sdev_init,
+ .sdev_configure = ufshcd_sdev_configure,
+ .sdev_destroy = ufshcd_sdev_destroy,
.change_queue_depth = ufshcd_change_queue_depth,
.eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
@@ -9156,7 +9111,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
- unsigned long flags;
ktime_t start = ktime_get();
bool clk_state_changed = false;
@@ -9207,11 +9161,10 @@ out:
clk_disable_unprepare(clki->clk);
}
} else if (!ret && on) {
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.state = CLKS_ON;
+ scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
+ hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
}
if (clk_state_changed)
@@ -10412,7 +10365,6 @@ static int ufshcd_add_scsi_host(struct ufs_hba *hba)
.nr_hw_queues = 1,
.queue_depth = hba->nutmrs,
.ops = &ufshcd_tmf_ops,
- .flags = BLK_MQ_F_NO_SCHED,
};
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
if (err < 0)
@@ -10591,14 +10543,17 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
/*
- * Set the default power management level for runtime and system PM.
+ * Set the default power management level for runtime and system PM if
+ * not set by the host controller drivers.
* Default power saving mode is to keep UFS link in Hibern8 state
* and UFS device in sleep state.
*/
- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ if (!hba->rpm_lvl)
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ if (!hba->spm_lvl)
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);