static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params);
static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
ufshcd_wb_toggle_buf_flush(hba, true);
}
-static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
-{
- if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
- scsi_unblock_requests(hba->host);
-}
-
-static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
-{
- if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
- scsi_block_requests(hba->host);
-}
-
static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
* Return: -ETIMEDOUT on error, zero on success.
*/
static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
- u32 val, unsigned long interval_us,
- unsigned long timeout_ms)
+ u32 val, unsigned long interval_us,
+ unsigned long timeout_ms)
{
- int err = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
-
- /* ignore bits that we don't intend to wait on */
- val = val & mask;
+ u32 v;
- while ((ufshcd_readl(hba, reg) & mask) != val) {
- usleep_range(interval_us, interval_us + 50);
- if (time_after(jiffies, timeout)) {
- if ((ufshcd_readl(hba, reg) & mask) != val)
- err = -ETIMEDOUT;
- break;
- }
- }
+ val &= mask; /* ignore bits that we don't intend to wait on */
- return err;
+ return read_poll_timeout(ufshcd_readl, v, (v & mask) == val,
+ interval_us, timeout_ms * 1000, false, hba, reg);
}
/**
static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
{
const struct scsi_device *sdev;
+ unsigned long flags;
u32 pending = 0;
- lockdep_assert_held(hba->host->host_lock);
+ spin_lock_irqsave(hba->host->host_lock, flags);
__shost_for_each_device(sdev, hba->host)
pending += sbitmap_weight(&sdev->budget_map);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
return pending;
}
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us)
{
- unsigned long flags;
int ret = 0;
u32 tm_doorbell;
u32 tr_pending;
ktime_t start;
ufshcd_hold(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
/*
* Wait for all the outstanding tasks/transfer requests.
* Verify by checking the doorbell registers are clear.
break;
}
- spin_unlock_irqrestore(hba->host->host_lock, flags);
io_schedule_timeout(msecs_to_jiffies(20));
if (ktime_to_us(ktime_sub(ktime_get(), start)) >
wait_timeout_us) {
*/
do_last_check = true;
}
- spin_lock_irqsave(hba->host->host_lock, flags);
} while (tm_doorbell || tr_pending);
if (timeout) {
ret = -EBUSY;
}
out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_release(hba);
return ret;
}
int err;
hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
- hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
/* nutrs and nutmrs are 0 based values */
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1;
* __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
* @hba: per adapter instance
* @uic_cmd: UIC command
- * @completion: initialize the completion only if this is set to true
*
* Return: 0 only if success.
*/
static int
-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
- bool completion)
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
lockdep_assert_held(&hba->uic_cmd_mutex);
return -EIO;
}
- if (completion)
- init_completion(&uic_cmd->done);
+ init_completion(&uic_cmd->done);
uic_cmd->cmd_active = 1;
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
- ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
+ ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
- memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
}
+static void __ufshcd_setup_cmd(struct ufshcd_lrb *lrbp, struct scsi_cmnd *cmd, u8 lun, int tag)
+{
+ memset(lrbp->ucd_req_ptr, 0, sizeof(*lrbp->ucd_req_ptr));
+
+ lrbp->cmd = cmd;
+ lrbp->task_tag = tag;
+ lrbp->lun = lun;
+ ufshcd_prepare_lrbp_crypto(cmd ? scsi_cmd_to_rq(cmd) : NULL, lrbp);
+}
+
+static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ struct scsi_cmnd *cmd, u8 lun, int tag)
+{
+ __ufshcd_setup_cmd(lrbp, cmd, lun, tag);
+ lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
+ lrbp->req_abort_skip = false;
+
+ ufshcd_comp_scsi_upiu(hba, lrbp);
+}
+
/**
* ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
* @upiu_wlun_id: UPIU W-LUN id
ufshcd_hold(hba);
lrbp = &hba->lrb[tag];
- lrbp->cmd = cmd;
- lrbp->task_tag = tag;
- lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
- lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
- ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
-
- lrbp->req_abort_skip = false;
-
- ufshcd_comp_scsi_upiu(hba, lrbp);
+ ufshcd_setup_scsi_cmd(hba, lrbp, cmd, ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag);
err = ufshcd_map_sg(hba, lrbp);
if (err) {
static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
enum dev_cmd_type cmd_type, u8 lun, int tag)
{
- lrbp->cmd = NULL;
- lrbp->task_tag = tag;
- lrbp->lun = lun;
+ __ufshcd_setup_cmd(lrbp, NULL, lun, tag);
lrbp->intr_cmd = true; /* No interrupt aggregation */
- ufshcd_prepare_lrbp_crypto(NULL, lrbp);
hba->dev_cmd.type = cmd_type;
}
static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
{
u32 mask;
- unsigned long flags;
int err;
if (hba->mcq_enabled) {
mask = 1U << task_tag;
/* clear outstanding transaction before retry */
- spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_utrl_clear(hba, mask);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
* wait for h/w to clear corresponding bit in door-bell.
reenable_intr = true;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ ret = __ufshcd_send_uic_cmd(hba, cmd);
if (ret) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
return -EINVAL;
}
+ if (pwr_info->lane_rx != pwr_info->lane_tx) {
+ dev_err(hba->dev, "%s: asymmetric connected lanes. rx=%d, tx=%d\n",
+ __func__,
+ pwr_info->lane_rx,
+ pwr_info->lane_tx);
+ return -EINVAL;
+ }
+
/*
* First, get the maximum gears of HS speed.
* If a zero value, it means there is no HSGEAR capability.
*/
static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
{
- int retry_outer = 3;
- int retry_inner;
+ int retry;
-start:
- if (ufshcd_is_hba_active(hba))
- /* change controller state to "reset state" */
- ufshcd_hba_stop(hba);
+ for (retry = 3; retry > 0; retry--) {
+ if (ufshcd_is_hba_active(hba))
+ /* change controller state to "reset state" */
+ ufshcd_hba_stop(hba);
- /* UniPro link is disabled at this point */
- ufshcd_set_link_off(hba);
+ /* UniPro link is disabled at this point */
+ ufshcd_set_link_off(hba);
- ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
+ ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
- /* start controller initialization sequence */
- ufshcd_hba_start(hba);
+ /* start controller initialization sequence */
+ ufshcd_hba_start(hba);
- /*
- * To initialize a UFS host controller HCE bit must be set to 1.
- * During initialization the HCE bit value changes from 1->0->1.
- * When the host controller completes initialization sequence
- * it sets the value of HCE bit to 1. The same HCE bit is read back
- * to check if the controller has completed initialization sequence.
- * So without this delay the value HCE = 1, set in the previous
- * instruction might be read back.
- * This delay can be changed based on the controller.
- */
- ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
+ /*
+ * To initialize a UFS host controller HCE bit must be set to 1.
+ * During initialization the HCE bit value changes from 1->0->1.
+ * When the host controller completes initialization sequence
+ * it sets the value of HCE bit to 1. The same HCE bit is read back
+ * to check if the controller has completed initialization sequence.
+ * So without this delay the value HCE = 1, set in the previous
+ * instruction might be read back.
+ * This delay can be changed based on the controller.
+ */
+ ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
- /* wait for the host controller to complete initialization */
- retry_inner = 50;
- while (!ufshcd_is_hba_active(hba)) {
- if (retry_inner) {
- retry_inner--;
- } else {
- dev_err(hba->dev,
- "Controller enable failed\n");
- if (retry_outer) {
- retry_outer--;
- goto start;
- }
- return -EIO;
- }
- usleep_range(1000, 1100);
+ /* wait for the host controller to complete initialization */
+ if (!ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE,
+ CONTROLLER_ENABLE, 1000, 50))
+ break;
+
+ dev_err(hba->dev, "Enabling the controller failed\n");
}
+ if (!retry)
+ return -EIO;
+
/* enable UIC related interrupts */
ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
*/
sdev->silence_suspend = 1;
+ if (hba->vops && hba->vops->config_scsi_dev)
+ hba->vops->config_scsi_dev(sdev);
+
ufshcd_crypto_register(hba, q);
return 0;
static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
irqreturn_t retval = IRQ_NONE;
+ struct uic_command *cmd;
spin_lock(hba->host->host_lock);
+ cmd = hba->active_uic_cmd;
+ if (WARN_ON_ONCE(!cmd))
+ goto unlock;
+
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
- if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
- hba->active_uic_cmd->argument2 |=
- ufshcd_get_uic_cmd_result(hba);
- hba->active_uic_cmd->argument3 =
- ufshcd_get_dme_attr_val(hba);
+ if (intr_status & UIC_COMMAND_COMPL) {
+ cmd->argument2 |= ufshcd_get_uic_cmd_result(hba);
+ cmd->argument3 = ufshcd_get_dme_attr_val(hba);
if (!hba->uic_async_done)
- hba->active_uic_cmd->cmd_active = 0;
- complete(&hba->active_uic_cmd->done);
+ cmd->cmd_active = 0;
+ complete(&cmd->done);
retval = IRQ_HANDLED;
}
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
- hba->active_uic_cmd->cmd_active = 0;
+ if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) {
+ cmd->cmd_active = 0;
complete(hba->uic_async_done);
retval = IRQ_HANDLED;
}
if (retval == IRQ_HANDLED)
- ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
- UFS_CMD_COMP);
+ ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP);
+
+unlock:
spin_unlock(hba->host->host_lock);
+
return retval;
}
u32 status = 0;
hba = container_of(work, struct ufs_hba, eeh_work);
- ufshcd_scsi_block_requests(hba);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
dev_err(hba->dev, "%s: failed to get exception status %d\n",
__func__, err);
- goto out;
+ return;
}
trace_ufshcd_exception_event(dev_name(hba->dev), status);
ufshcd_temp_exception_event_handler(hba, status);
ufs_debugfs_exception_event(hba, status);
-out:
- ufshcd_scsi_unblock_requests(hba);
}
/* Complete requests that have door-bell cleared */
ufshcd_suspend_clkscaling(hba);
ufshcd_clk_scaling_allow(hba, false);
}
- ufshcd_scsi_block_requests(hba);
/* Wait for ongoing ufshcd_queuecommand() calls to finish. */
- blk_mq_wait_quiesce_done(&hba->host->tag_set);
+ blk_mq_quiesce_tagset(&hba->host->tag_set);
cancel_work_sync(&hba->eeh_work);
}
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
{
- ufshcd_scsi_unblock_requests(hba);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
ufshcd_release(hba);
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
{
int err = 0;
u32 mask = 1 << tag;
- unsigned long flags;
if (!test_bit(tag, &hba->outstanding_tasks))
goto out;
- spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_utmrl_clear(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
/* poll for max. 1 sec to clear door bell register by h/w */
err = ufshcd_wait_for_register(hba,
memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
- /* send command to the controller */
__set_bit(task_tag, &hba->outstanding_tasks);
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
spin_unlock_irqrestore(host->host_lock, flags);
+ /* send command to the controller */
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
+
ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
/* wait until the task management command is completed */
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
{
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- int err = 0;
+ int err;
int poll_cnt;
u8 resp = 0xF;
- u32 reg;
for (poll_cnt = 100; poll_cnt; poll_cnt--) {
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
* cmd not pending in the device, check if it is
* in transition.
*/
- dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
+ dev_info(
+ hba->dev,
+ "%s: cmd with tag %d not pending in the device.\n",
__func__, tag);
- if (hba->mcq_enabled) {
- /* MCQ mode */
- if (ufshcd_cmd_inflight(lrbp->cmd)) {
- /* sleep for max. 200us same delay as in SDB mode */
- usleep_range(100, 200);
- continue;
- }
- /* command completed already */
- dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n",
- __func__, tag);
- goto out;
- }
-
- /* Single Doorbell Mode */
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- if (reg & (1 << tag)) {
- /* sleep for max. 200us to stabilize */
- usleep_range(100, 200);
- continue;
+ if (!ufshcd_cmd_inflight(lrbp->cmd)) {
+ dev_info(hba->dev,
+ "%s: cmd with tag=%d completed.\n",
+ __func__, tag);
+ return 0;
}
- /* command completed already */
- dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
- __func__, tag);
- goto out;
+ usleep_range(100, 200);
} else {
dev_err(hba->dev,
"%s: no response from device. tag = %d, err %d\n",
__func__, tag, err);
- if (!err)
- err = resp; /* service response error */
- goto out;
+ return err ? : resp;
}
}
- if (!poll_cnt) {
- err = -EBUSY;
- goto out;
- }
+ if (!poll_cnt)
+ return -EBUSY;
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
UFS_ABORT_TASK, &resp);
dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
__func__, tag, err);
}
- goto out;
+ return err;
}
err = ufshcd_clear_cmd(hba, tag);
dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
__func__, tag, err);
-out:
return err;
}
return err;
}
+/**
+ * ufshcd_process_probe_result - Process the ufshcd_probe_hba() result.
+ * @hba: UFS host controller instance.
+ * @probe_start: time when the ufshcd_probe_hba() call started.
+ * @ret: ufshcd_probe_hba() return value.
+ */
+static void ufshcd_process_probe_result(struct ufs_hba *hba,
+ ktime_t probe_start, int ret)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ret)
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ trace_ufshcd_init(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), probe_start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+}
+
/**
* ufshcd_host_reset_and_restore - reset and restore host controller
* @hba: per-adapter instance
err = ufshcd_hba_enable(hba);
/* Establish the link again and restore the device */
- if (!err)
- err = ufshcd_probe_hba(hba, false);
+ if (!err) {
+ ktime_t probe_start = ktime_get();
+
+ err = ufshcd_device_init(hba, /*init_dev_params=*/false);
+ if (!err)
+ err = ufshcd_probe_hba(hba, false);
+ ufshcd_process_probe_result(hba, probe_start, err);
+ }
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
ufshcd_init_clk_scaling_sysfs(hba);
}
+ /*
+ * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev
+ * pointer and hence must only be started after the WLUN pointer has
+ * been initialized by ufshcd_scsi_add_wlus().
+ */
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
+ msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
+
ufs_bsg_probe(hba);
scsi_scan_host(hba->host);
hba->nutrs);
}
- schedule_delayed_work(&hba->ufs_rtc_update_work,
- msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
+static int ufshcd_post_device_init(struct ufs_hba *hba)
+{
+ int ret;
+
+ ufshcd_tune_unipro_params(hba);
+
+ /* UFS device is also active now */
+ ufshcd_set_ufs_dev_active(hba);
+ ufshcd_force_reset_auto_bkops(hba);
+
+ ufshcd_set_timestamp_attr(hba);
+
+ if (!hba->max_pwr_info.is_valid)
+ return 0;
+
+ /*
+ * Set the right value to bRefClkFreq before attempting to
+ * switch to HS gears.
+ */
+ if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
+ ufshcd_set_dev_ref_clk(hba);
+ /* Gear up to HS gear. */
+ ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
{
int ret;
- struct Scsi_Host *host = hba->host;
+
+ WARN_ON_ONCE(!hba->scsi_host_added);
hba->ufshcd_state = UFSHCD_STATE_RESET;
ret = ufshcd_device_params_init(hba);
if (ret)
return ret;
- if (is_mcq_supported(hba) && !hba->scsi_host_added) {
- ufshcd_mcq_enable(hba);
- ret = ufshcd_alloc_mcq(hba);
- if (!ret) {
- ufshcd_config_mcq(hba);
- } else {
- /* Continue with SDB mode */
- ufshcd_mcq_disable(hba);
- use_mcq_mode = false;
- dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
- ret);
- }
- ret = scsi_add_host(host, hba->dev);
- if (ret) {
- dev_err(hba->dev, "scsi_add_host failed\n");
- return ret;
- }
- hba->scsi_host_added = true;
- } else if (is_mcq_supported(hba)) {
- /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
+ if (is_mcq_supported(hba) &&
+ hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
ufshcd_config_mcq(hba);
ufshcd_mcq_enable(hba);
}
}
- ufshcd_tune_unipro_params(hba);
-
- /* UFS device is also active now */
- ufshcd_set_ufs_dev_active(hba);
- ufshcd_force_reset_auto_bkops(hba);
-
- ufshcd_set_timestamp_attr(hba);
-
- /* Gear up to HS gear if supported */
- if (hba->max_pwr_info.is_valid) {
- /*
- * Set the right value to bRefClkFreq before attempting to
- * switch to HS gears.
- */
- if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
- ufshcd_set_dev_ref_clk(hba);
- ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
- if (ret) {
- dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
- __func__, ret);
- return ret;
- }
- }
-
- return 0;
+ return ufshcd_post_device_init(hba);
}
/**
*/
static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
{
- ktime_t start = ktime_get();
- unsigned long flags;
int ret;
- ret = ufshcd_device_init(hba, init_dev_params);
- if (ret)
- goto out;
-
if (!hba->pm_op_in_progress &&
(hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
/* Reset the device and controller before doing reinit */
dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
- goto out;
+ return ret;
}
/* Reinit the device */
ret = ufshcd_device_init(hba, init_dev_params);
if (ret)
- goto out;
+ return ret;
}
ufshcd_print_pwr_info(hba);
ufshcd_write_ee_control(hba);
ufshcd_configure_auto_hibern8(hba);
-out:
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (ret)
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- trace_ufshcd_init(dev_name(hba->dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- return ret;
+ return 0;
}
/**
static void ufshcd_async_scan(void *data, async_cookie_t cookie)
{
struct ufs_hba *hba = (struct ufs_hba *)data;
+ ktime_t probe_start;
int ret;
down(&hba->host_sem);
/* Initialize hba, detect and initialize UFS device */
+ probe_start = ktime_get();
ret = ufshcd_probe_hba(hba, true);
+ ufshcd_process_probe_result(hba, probe_start, ret);
up(&hba->host_sem);
if (ret)
goto out;
*/
static int ufshcd_set_dma_mask(struct ufs_hba *hba)
{
+ if (hba->vops && hba->vops->set_dma_mask)
+ return hba->vops->set_dma_mask(hba);
if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
return 0;
.queue_rq = ufshcd_queue_tmf,
};
+static int ufshcd_add_scsi_host(struct ufs_hba *hba)
+{
+ int err;
+
+ if (is_mcq_supported(hba)) {
+ ufshcd_mcq_enable(hba);
+ err = ufshcd_alloc_mcq(hba);
+ if (!err) {
+ ufshcd_config_mcq(hba);
+ } else {
+ /* Continue with SDB mode */
+ ufshcd_mcq_disable(hba);
+ use_mcq_mode = false;
+ dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
+ err);
+ }
+ }
+ if (!is_mcq_supported(hba) && !hba->lsdb_sup) {
+ dev_err(hba->dev,
+ "%s: failed to initialize (legacy doorbell mode not supported)\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ err = scsi_add_host(hba->host, hba->dev);
+ if (err) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ return err;
+ }
+ hba->scsi_host_added = true;
+
+ hba->tmf_tag_set = (struct blk_mq_tag_set) {
+ .nr_hw_queues = 1,
+ .queue_depth = hba->nutmrs,
+ .ops = &ufshcd_tmf_ops,
+ .flags = BLK_MQ_F_NO_SCHED,
+ };
+ err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
+ if (err < 0)
+ goto remove_scsi_host;
+ hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL);
+ if (IS_ERR(hba->tmf_queue)) {
+ err = PTR_ERR(hba->tmf_queue);
+ goto free_tmf_tag_set;
+ }
+ hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
+ sizeof(*hba->tmf_rqs), GFP_KERNEL);
+ if (!hba->tmf_rqs) {
+ err = -ENOMEM;
+ goto free_tmf_queue;
+ }
+
+ return 0;
+
+free_tmf_queue:
+ blk_mq_destroy_queue(hba->tmf_queue);
+ blk_put_queue(hba->tmf_queue);
+
+free_tmf_tag_set:
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+
+remove_scsi_host:
+ if (hba->scsi_host_added)
+ scsi_remove_host(hba->host);
+
+ return err;
+}
+
/**
* ufshcd_init - Driver initialization routine
* @hba: per-adapter instance
hba->is_irq_enabled = true;
}
- if (!is_mcq_supported(hba)) {
- if (!hba->lsdb_sup) {
- dev_err(hba->dev, "%s: failed to initialize (legacy doorbell mode not supported)\n",
- __func__);
- err = -EINVAL;
- goto out_disable;
- }
- err = scsi_add_host(host, hba->dev);
- if (err) {
- dev_err(hba->dev, "scsi_add_host failed\n");
- goto out_disable;
- }
- hba->scsi_host_added = true;
- }
-
- hba->tmf_tag_set = (struct blk_mq_tag_set) {
- .nr_hw_queues = 1,
- .queue_depth = hba->nutmrs,
- .ops = &ufshcd_tmf_ops,
- .flags = BLK_MQ_F_NO_SCHED,
- };
- err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
- if (err < 0)
- goto out_remove_scsi_host;
- hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL);
- if (IS_ERR(hba->tmf_queue)) {
- err = PTR_ERR(hba->tmf_queue);
- goto free_tmf_tag_set;
- }
- hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
- sizeof(*hba->tmf_rqs), GFP_KERNEL);
- if (!hba->tmf_rqs) {
- err = -ENOMEM;
- goto free_tmf_queue;
- }
-
/* Reset the attached device */
ufshcd_device_reset(hba);
dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
- goto free_tmf_queue;
+ goto out_disable;
}
/*
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
- atomic_set(&hba->scsi_block_reqs_cnt, 0);
+
/*
* We are assuming that device wasn't put in sleep/power-down
* state exclusively during the boot stage before kernel.
*/
ufshcd_set_ufs_dev_active(hba);
+ /* Initialize hba, detect and initialize UFS device */
+ ktime_t probe_start = ktime_get();
+
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+
+ err = ufshcd_link_startup(hba);
+ if (err)
+ goto out_disable;
+
+ if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
+ goto initialized;
+
+ /* Debug counters initialization */
+ ufshcd_clear_dbg_ufs_stats(hba);
+
+ /* UniPro link is active now */
+ ufshcd_set_link_active(hba);
+
+ /* Verify device initialization by sending NOP OUT UPIU */
+ err = ufshcd_verify_dev_init(hba);
+ if (err)
+ goto out_disable;
+
+ /* Initiate UFS initialization, and waiting until completion */
+ err = ufshcd_complete_dev_init(hba);
+ if (err)
+ goto out_disable;
+
+ err = ufshcd_device_params_init(hba);
+ if (err)
+ goto out_disable;
+
+ err = ufshcd_post_device_init(hba);
+
+initialized:
+ ufshcd_process_probe_result(hba, probe_start, err);
+ if (err)
+ goto out_disable;
+
+ err = ufshcd_add_scsi_host(hba);
+ if (err)
+ goto out_disable;
+
async_schedule(ufshcd_async_scan, hba);
ufs_sysfs_add_nodes(hba->dev);
ufshcd_pm_qos_init(hba);
return 0;
-free_tmf_queue:
- blk_mq_destroy_queue(hba->tmf_queue);
- blk_put_queue(hba->tmf_queue);
-free_tmf_tag_set:
- blk_mq_free_tag_set(&hba->tmf_tag_set);
-out_remove_scsi_host:
- if (hba->scsi_host_added)
- scsi_remove_host(hba->host);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);