]>
Commit | Line | Data |
---|---|---|
7a3e97b0 | 1 | /* |
e0eca63e | 2 | * Universal Flash Storage Host controller driver Core |
7a3e97b0 SY |
3 | * |
4 | * This code is based on drivers/scsi/ufs/ufshcd.c | |
3b1d0580 | 5 | * Copyright (C) 2011-2013 Samsung India Software Operations |
52ac95fe | 6 | * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. |
7a3e97b0 | 7 | * |
3b1d0580 VH |
8 | * Authors: |
9 | * Santosh Yaraganavi <[email protected]> | |
10 | * Vinayak Holikatti <[email protected]> | |
7a3e97b0 SY |
11 | * |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version 2 | |
15 | * of the License, or (at your option) any later version. | |
3b1d0580 VH |
16 | * See the COPYING file in the top-level directory or visit |
17 | * <http://www.gnu.org/licenses/gpl-2.0.html> | |
7a3e97b0 SY |
18 | * |
19 | * This program is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
22 | * GNU General Public License for more details. | |
23 | * | |
3b1d0580 VH |
24 | * This program is provided "AS IS" and "WITH ALL FAULTS" and |
25 | * without warranty of any kind. You are solely responsible for | |
26 | * determining the appropriateness of using and distributing | |
27 | * the program and assume all risks associated with your exercise | |
28 | * of rights with respect to the program, including but not limited | |
29 | * to infringement of third party rights, the risks and costs of | |
30 | * program errors, damage to or loss of data, programs or equipment, | |
31 | * and unavailability or interruption of operations. Under no | |
32 | * circumstances will the contributor of this Program be liable for | |
33 | * any damages of any kind arising from your use or distribution of | |
34 | * this program. | |
5c0c28a8 SRT |
35 | * |
36 | * The Linux Foundation chooses to take subject only to the GPLv2 | |
37 | * license terms, and distributes only under these terms. | |
7a3e97b0 SY |
38 | */ |
39 | ||
6ccf44fe | 40 | #include <linux/async.h> |
856b3483 | 41 | #include <linux/devfreq.h> |
b573d484 | 42 | #include <linux/nls.h> |
54b879b7 | 43 | #include <linux/of.h> |
ad448378 | 44 | #include <linux/bitfield.h> |
e0eca63e | 45 | #include "ufshcd.h" |
c58ab7aa | 46 | #include "ufs_quirks.h" |
53b3d9c3 | 47 | #include "unipro.h" |
cbb6813e | 48 | #include "ufs-sysfs.h" |
df032bf2 | 49 | #include "ufs_bsg.h" |
7a3e97b0 | 50 | |
7ff5ab47 SJ |
51 | #define CREATE_TRACE_POINTS |
52 | #include <trace/events/ufs.h> | |
53 | ||
dcea0bfb GB |
54 | #define UFSHCD_REQ_SENSE_SIZE 18 |
55 | ||
2fbd009b SJ |
56 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ |
57 | UTP_TASK_REQ_COMPL |\ | |
58 | UFSHCD_ERROR_MASK) | |
6ccf44fe SJ |
59 | /* UIC command timeout, unit: ms */ |
60 | #define UIC_CMD_TIMEOUT 500 | |
2fbd009b | 61 | |
5a0b0cb9 SRT |
62 | /* NOP OUT retries waiting for NOP IN response */ |
63 | #define NOP_OUT_RETRIES 10 | |
64 | /* Timeout after 30 msecs if NOP OUT hangs without response */ | |
65 | #define NOP_OUT_TIMEOUT 30 /* msecs */ | |
66 | ||
68078d5c | 67 | /* Query request retries */ |
10fe5888 | 68 | #define QUERY_REQ_RETRIES 3 |
68078d5c | 69 | /* Query request timeout */ |
10fe5888 | 70 | #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ |
68078d5c | 71 | |
e2933132 SRT |
72 | /* Task management command timeout */ |
73 | #define TM_CMD_TIMEOUT 100 /* msecs */ | |
74 | ||
64238fbd YG |
75 | /* maximum number of retries for a general UIC command */ |
76 | #define UFS_UIC_COMMAND_RETRIES 3 | |
77 | ||
1d337ec2 SRT |
78 | /* maximum number of link-startup retries */ |
79 | #define DME_LINKSTARTUP_RETRIES 3 | |
80 | ||
87d0b4a6 YG |
81 | /* Maximum retries for Hibern8 enter */ |
82 | #define UIC_HIBERN8_ENTER_RETRIES 3 | |
83 | ||
1d337ec2 SRT |
84 | /* maximum number of reset retries before giving up */ |
85 | #define MAX_HOST_RESET_RETRIES 5 | |
86 | ||
68078d5c DR |
87 | /* Expose the flag value from utp_upiu_query.value */ |
88 | #define MASK_QUERY_UPIU_FLAG_LOC 0xFF | |
89 | ||
7d568652 SJ |
90 | /* Interrupt aggregation default timeout, unit: 40us */ |
91 | #define INT_AGGR_DEF_TO 0x02 | |
92 | ||
aa497613 SRT |
93 | #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ |
94 | ({ \ | |
95 | int _ret; \ | |
96 | if (_on) \ | |
97 | _ret = ufshcd_enable_vreg(_dev, _vreg); \ | |
98 | else \ | |
99 | _ret = ufshcd_disable_vreg(_dev, _vreg); \ | |
100 | _ret; \ | |
101 | }) | |
102 | ||
ba80917d TW |
103 | #define ufshcd_hex_dump(prefix_str, buf, len) do { \ |
104 | size_t __len = (len); \ | |
105 | print_hex_dump(KERN_ERR, prefix_str, \ | |
106 | __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\ | |
107 | 16, 4, buf, __len, false); \ | |
108 | } while (0) | |
109 | ||
110 | int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, | |
111 | const char *prefix) | |
112 | { | |
113 | u8 *regs; | |
114 | ||
115 | regs = kzalloc(len, GFP_KERNEL); | |
116 | if (!regs) | |
117 | return -ENOMEM; | |
118 | ||
119 | memcpy_fromio(regs, hba->mmio_base + offset, len); | |
120 | ufshcd_hex_dump(prefix, regs, len); | |
121 | kfree(regs); | |
122 | ||
123 | return 0; | |
124 | } | |
125 | EXPORT_SYMBOL_GPL(ufshcd_dump_regs); | |
66cc820f | 126 | |
7a3e97b0 SY |
127 | enum { |
128 | UFSHCD_MAX_CHANNEL = 0, | |
129 | UFSHCD_MAX_ID = 1, | |
7a3e97b0 SY |
130 | UFSHCD_CMD_PER_LUN = 32, |
131 | UFSHCD_CAN_QUEUE = 32, | |
132 | }; | |
133 | ||
134 | /* UFSHCD states */ | |
135 | enum { | |
7a3e97b0 SY |
136 | UFSHCD_STATE_RESET, |
137 | UFSHCD_STATE_ERROR, | |
3441da7d | 138 | UFSHCD_STATE_OPERATIONAL, |
141f8165 | 139 | UFSHCD_STATE_EH_SCHEDULED, |
3441da7d SRT |
140 | }; |
141 | ||
142 | /* UFSHCD error handling flags */ | |
143 | enum { | |
144 | UFSHCD_EH_IN_PROGRESS = (1 << 0), | |
7a3e97b0 SY |
145 | }; |
146 | ||
e8e7f271 SRT |
147 | /* UFSHCD UIC layer error flags */ |
148 | enum { | |
149 | UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ | |
9a47ec7c YG |
150 | UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */ |
151 | UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */ | |
152 | UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */ | |
153 | UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */ | |
154 | UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */ | |
e8e7f271 SRT |
155 | }; |
156 | ||
3441da7d | 157 | #define ufshcd_set_eh_in_progress(h) \ |
9c490d2d | 158 | ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS) |
3441da7d | 159 | #define ufshcd_eh_in_progress(h) \ |
9c490d2d | 160 | ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS) |
3441da7d | 161 | #define ufshcd_clear_eh_in_progress(h) \ |
9c490d2d | 162 | ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) |
3441da7d | 163 | |
57d104c1 SJ |
164 | #define ufshcd_set_ufs_dev_active(h) \ |
165 | ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) | |
166 | #define ufshcd_set_ufs_dev_sleep(h) \ | |
167 | ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) | |
168 | #define ufshcd_set_ufs_dev_poweroff(h) \ | |
169 | ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) | |
170 | #define ufshcd_is_ufs_dev_active(h) \ | |
171 | ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) | |
172 | #define ufshcd_is_ufs_dev_sleep(h) \ | |
173 | ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) | |
174 | #define ufshcd_is_ufs_dev_poweroff(h) \ | |
175 | ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) | |
176 | ||
cbb6813e | 177 | struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { |
57d104c1 SJ |
178 | {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, |
179 | {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | |
180 | {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE}, | |
181 | {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | |
182 | {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | |
183 | {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE}, | |
184 | }; | |
185 | ||
186 | static inline enum ufs_dev_pwr_mode | |
187 | ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl) | |
188 | { | |
189 | return ufs_pm_lvl_states[lvl].dev_state; | |
190 | } | |
191 | ||
192 | static inline enum uic_link_state | |
193 | ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) | |
194 | { | |
195 | return ufs_pm_lvl_states[lvl].link_state; | |
196 | } | |
197 | ||
0c8f7586 SJ |
198 | static inline enum ufs_pm_level |
199 | ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, | |
200 | enum uic_link_state link_state) | |
201 | { | |
202 | enum ufs_pm_level lvl; | |
203 | ||
204 | for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) { | |
205 | if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) && | |
206 | (ufs_pm_lvl_states[lvl].link_state == link_state)) | |
207 | return lvl; | |
208 | } | |
209 | ||
210 | /* if no match found, return the level 0 */ | |
211 | return UFS_PM_LVL_0; | |
212 | } | |
213 | ||
56d4a186 SJ |
214 | static struct ufs_dev_fix ufs_fixups[] = { |
215 | /* UFS cards deviations table */ | |
216 | UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, | |
217 | UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), | |
218 | UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), | |
219 | UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, | |
220 | UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS), | |
221 | UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, | |
222 | UFS_DEVICE_NO_FASTAUTO), | |
223 | UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, | |
224 | UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE), | |
225 | UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL, | |
226 | UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), | |
227 | UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG", | |
228 | UFS_DEVICE_QUIRK_PA_TACTIVATE), | |
229 | UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG", | |
230 | UFS_DEVICE_QUIRK_PA_TACTIVATE), | |
231 | UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), | |
232 | UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, | |
233 | UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME), | |
8e4829c6 WL |
234 | UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/, |
235 | UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME), | |
56d4a186 SJ |
236 | |
237 | END_FIX | |
238 | }; | |
239 | ||
3441da7d SRT |
240 | static void ufshcd_tmc_handler(struct ufs_hba *hba); |
241 | static void ufshcd_async_scan(void *data, async_cookie_t cookie); | |
e8e7f271 | 242 | static int ufshcd_reset_and_restore(struct ufs_hba *hba); |
e7d38257 | 243 | static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); |
e8e7f271 | 244 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); |
1d337ec2 SRT |
245 | static void ufshcd_hba_exit(struct ufs_hba *hba); |
246 | static int ufshcd_probe_hba(struct ufs_hba *hba); | |
1ab27c9c ST |
247 | static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, |
248 | bool skip_ref_clk); | |
249 | static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); | |
60f01870 | 250 | static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused); |
1ab27c9c ST |
251 | static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); |
252 | static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); | |
cad2e03d | 253 | static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); |
57d104c1 | 254 | static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); |
fcb0c4b0 ST |
255 | static void ufshcd_resume_clkscaling(struct ufs_hba *hba); |
256 | static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); | |
401f1e44 | 257 | static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba); |
fcb0c4b0 | 258 | static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); |
57d104c1 | 259 | static irqreturn_t ufshcd_intr(int irq, void *__hba); |
874237f7 YG |
260 | static int ufshcd_change_power_mode(struct ufs_hba *hba, |
261 | struct ufs_pa_layer_attr *pwr_mode); | |
14497328 YG |
262 | static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag) |
263 | { | |
264 | return tag >= 0 && tag < hba->nutrs; | |
265 | } | |
57d104c1 SJ |
266 | |
267 | static inline int ufshcd_enable_irq(struct ufs_hba *hba) | |
268 | { | |
269 | int ret = 0; | |
270 | ||
271 | if (!hba->is_irq_enabled) { | |
272 | ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, | |
273 | hba); | |
274 | if (ret) | |
275 | dev_err(hba->dev, "%s: request_irq failed, ret=%d\n", | |
276 | __func__, ret); | |
277 | hba->is_irq_enabled = true; | |
278 | } | |
279 | ||
280 | return ret; | |
281 | } | |
282 | ||
283 | static inline void ufshcd_disable_irq(struct ufs_hba *hba) | |
284 | { | |
285 | if (hba->is_irq_enabled) { | |
286 | free_irq(hba->irq, hba); | |
287 | hba->is_irq_enabled = false; | |
288 | } | |
289 | } | |
3441da7d | 290 | |
38135535 SJ |
291 | static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) |
292 | { | |
293 | if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) | |
294 | scsi_unblock_requests(hba->host); | |
295 | } | |
296 | ||
297 | static void ufshcd_scsi_block_requests(struct ufs_hba *hba) | |
298 | { | |
299 | if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) | |
300 | scsi_block_requests(hba->host); | |
301 | } | |
302 | ||
b573d484 YG |
303 | /* replace non-printable or non-ASCII characters with spaces */ |
304 | static inline void ufshcd_remove_non_printable(char *val) | |
305 | { | |
306 | if (!val) | |
307 | return; | |
308 | ||
309 | if (*val < 0x20 || *val > 0x7e) | |
310 | *val = ' '; | |
311 | } | |
312 | ||
6667e6d9 OS |
313 | static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, |
314 | const char *str) | |
315 | { | |
316 | struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; | |
317 | ||
318 | trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb); | |
319 | } | |
320 | ||
321 | static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag, | |
322 | const char *str) | |
323 | { | |
324 | struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; | |
325 | ||
326 | trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr); | |
327 | } | |
328 | ||
329 | static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, | |
330 | const char *str) | |
331 | { | |
6667e6d9 | 332 | int off = (int)tag - hba->nutrs; |
391e388f | 333 | struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off]; |
6667e6d9 | 334 | |
391e388f CH |
335 | trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header, |
336 | &descp->input_param1); | |
6667e6d9 OS |
337 | } |
338 | ||
1a07f2d9 LS |
339 | static void ufshcd_add_command_trace(struct ufs_hba *hba, |
340 | unsigned int tag, const char *str) | |
341 | { | |
342 | sector_t lba = -1; | |
343 | u8 opcode = 0; | |
344 | u32 intr, doorbell; | |
e7c3b379 | 345 | struct ufshcd_lrb *lrbp = &hba->lrb[tag]; |
1a07f2d9 LS |
346 | int transfer_len = -1; |
347 | ||
e7c3b379 OS |
348 | if (!trace_ufshcd_command_enabled()) { |
349 | /* trace UPIU W/O tracing command */ | |
350 | if (lrbp->cmd) | |
351 | ufshcd_add_cmd_upiu_trace(hba, tag, str); | |
1a07f2d9 | 352 | return; |
e7c3b379 | 353 | } |
1a07f2d9 LS |
354 | |
355 | if (lrbp->cmd) { /* data phase exists */ | |
e7c3b379 OS |
356 | /* trace UPIU also */ |
357 | ufshcd_add_cmd_upiu_trace(hba, tag, str); | |
1a07f2d9 LS |
358 | opcode = (u8)(*lrbp->cmd->cmnd); |
359 | if ((opcode == READ_10) || (opcode == WRITE_10)) { | |
360 | /* | |
361 | * Currently we only fully trace read(10) and write(10) | |
362 | * commands | |
363 | */ | |
364 | if (lrbp->cmd->request && lrbp->cmd->request->bio) | |
365 | lba = | |
366 | lrbp->cmd->request->bio->bi_iter.bi_sector; | |
367 | transfer_len = be32_to_cpu( | |
368 | lrbp->ucd_req_ptr->sc.exp_data_transfer_len); | |
369 | } | |
370 | } | |
371 | ||
372 | intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); | |
373 | doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); | |
374 | trace_ufshcd_command(dev_name(hba->dev), str, tag, | |
375 | doorbell, transfer_len, intr, lba, opcode); | |
376 | } | |
377 | ||
ff8e20c6 DR |
378 | static void ufshcd_print_clk_freqs(struct ufs_hba *hba) |
379 | { | |
380 | struct ufs_clk_info *clki; | |
381 | struct list_head *head = &hba->clk_list_head; | |
382 | ||
566ec9ad | 383 | if (list_empty(head)) |
ff8e20c6 DR |
384 | return; |
385 | ||
386 | list_for_each_entry(clki, head, list) { | |
387 | if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq && | |
388 | clki->max_freq) | |
389 | dev_err(hba->dev, "clk: %s, rate: %u\n", | |
390 | clki->name, clki->curr_freq); | |
391 | } | |
392 | } | |
393 | ||
394 | static void ufshcd_print_uic_err_hist(struct ufs_hba *hba, | |
395 | struct ufs_uic_err_reg_hist *err_hist, char *err_name) | |
396 | { | |
397 | int i; | |
398 | ||
399 | for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) { | |
400 | int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH; | |
401 | ||
402 | if (err_hist->reg[p] == 0) | |
403 | continue; | |
404 | dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i, | |
405 | err_hist->reg[p], ktime_to_us(err_hist->tstamp[p])); | |
406 | } | |
407 | } | |
408 | ||
66cc820f DR |
409 | static void ufshcd_print_host_regs(struct ufs_hba *hba) |
410 | { | |
ba80917d | 411 | ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); |
66cc820f DR |
412 | dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n", |
413 | hba->ufs_version, hba->capabilities); | |
414 | dev_err(hba->dev, | |
415 | "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n", | |
416 | (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks); | |
ff8e20c6 DR |
417 | dev_err(hba->dev, |
418 | "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n", | |
419 | ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp), | |
420 | hba->ufs_stats.hibern8_exit_cnt); | |
421 | ||
422 | ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err"); | |
423 | ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err"); | |
424 | ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err"); | |
425 | ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err"); | |
426 | ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err"); | |
427 | ||
428 | ufshcd_print_clk_freqs(hba); | |
429 | ||
430 | if (hba->vops && hba->vops->dbg_register_dump) | |
431 | hba->vops->dbg_register_dump(hba); | |
66cc820f DR |
432 | } |
433 | ||
434 | static | |
435 | void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) | |
436 | { | |
437 | struct ufshcd_lrb *lrbp; | |
7fabb77b | 438 | int prdt_length; |
66cc820f DR |
439 | int tag; |
440 | ||
441 | for_each_set_bit(tag, &bitmap, hba->nutrs) { | |
442 | lrbp = &hba->lrb[tag]; | |
443 | ||
ff8e20c6 DR |
444 | dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", |
445 | tag, ktime_to_us(lrbp->issue_time_stamp)); | |
09017188 ZL |
446 | dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", |
447 | tag, ktime_to_us(lrbp->compl_time_stamp)); | |
ff8e20c6 DR |
448 | dev_err(hba->dev, |
449 | "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n", | |
450 | tag, (u64)lrbp->utrd_dma_addr); | |
451 | ||
66cc820f DR |
452 | ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr, |
453 | sizeof(struct utp_transfer_req_desc)); | |
ff8e20c6 DR |
454 | dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, |
455 | (u64)lrbp->ucd_req_dma_addr); | |
66cc820f DR |
456 | ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr, |
457 | sizeof(struct utp_upiu_req)); | |
ff8e20c6 DR |
458 | dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, |
459 | (u64)lrbp->ucd_rsp_dma_addr); | |
66cc820f DR |
460 | ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr, |
461 | sizeof(struct utp_upiu_rsp)); | |
66cc820f | 462 | |
7fabb77b GB |
463 | prdt_length = le16_to_cpu( |
464 | lrbp->utr_descriptor_ptr->prd_table_length); | |
465 | dev_err(hba->dev, | |
466 | "UPIU[%d] - PRDT - %d entries phys@0x%llx\n", | |
467 | tag, prdt_length, | |
468 | (u64)lrbp->ucd_prdt_dma_addr); | |
469 | ||
470 | if (pr_prdt) | |
66cc820f | 471 | ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr, |
7fabb77b | 472 | sizeof(struct ufshcd_sg_entry) * prdt_length); |
66cc820f DR |
473 | } |
474 | } | |
475 | ||
476 | static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) | |
477 | { | |
66cc820f DR |
478 | int tag; |
479 | ||
480 | for_each_set_bit(tag, &bitmap, hba->nutmrs) { | |
391e388f CH |
481 | struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag]; |
482 | ||
66cc820f | 483 | dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); |
391e388f | 484 | ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp)); |
66cc820f DR |
485 | } |
486 | } | |
487 | ||
6ba65588 GB |
488 | static void ufshcd_print_host_state(struct ufs_hba *hba) |
489 | { | |
490 | dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); | |
491 | dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n", | |
e002e651 | 492 | hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks); |
6ba65588 GB |
493 | dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", |
494 | hba->saved_err, hba->saved_uic_err); | |
495 | dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", | |
496 | hba->curr_dev_pwr_mode, hba->uic_link_state); | |
497 | dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n", | |
498 | hba->pm_op_in_progress, hba->is_sys_suspended); | |
499 | dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n", | |
500 | hba->auto_bkops_enabled, hba->host->host_self_blocked); | |
501 | dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state); | |
502 | dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", | |
503 | hba->eh_flags, hba->req_abort_count); | |
504 | dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n", | |
505 | hba->capabilities, hba->caps); | |
506 | dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, | |
507 | hba->dev_quirks); | |
508 | } | |
509 | ||
ff8e20c6 DR |
510 | /** |
511 | * ufshcd_print_pwr_info - print power params as saved in hba | |
512 | * power info | |
513 | * @hba: per-adapter instance | |
514 | */ | |
515 | static void ufshcd_print_pwr_info(struct ufs_hba *hba) | |
516 | { | |
517 | static const char * const names[] = { | |
518 | "INVALID MODE", | |
519 | "FAST MODE", | |
520 | "SLOW_MODE", | |
521 | "INVALID MODE", | |
522 | "FASTAUTO_MODE", | |
523 | "SLOWAUTO_MODE", | |
524 | "INVALID MODE", | |
525 | }; | |
526 | ||
527 | dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", | |
528 | __func__, | |
529 | hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, | |
530 | hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, | |
531 | names[hba->pwr_info.pwr_rx], | |
532 | names[hba->pwr_info.pwr_tx], | |
533 | hba->pwr_info.hs_rate); | |
534 | } | |
535 | ||
5a0b0cb9 SRT |
536 | /* |
537 | * ufshcd_wait_for_register - wait for register value to change | |
538 | * @hba - per-adapter interface | |
539 | * @reg - mmio register offset | |
540 | * @mask - mask to apply to read register value | |
541 | * @val - wait condition | |
542 | * @interval_us - polling interval in microsecs | |
543 | * @timeout_ms - timeout in millisecs | |
596585a2 | 544 | * @can_sleep - perform sleep or just spin |
5a0b0cb9 SRT |
545 | * |
546 | * Returns -ETIMEDOUT on error, zero on success | |
547 | */ | |
596585a2 YG |
548 | int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, |
549 | u32 val, unsigned long interval_us, | |
550 | unsigned long timeout_ms, bool can_sleep) | |
5a0b0cb9 SRT |
551 | { |
552 | int err = 0; | |
553 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | |
554 | ||
555 | /* ignore bits that we don't intend to wait on */ | |
556 | val = val & mask; | |
557 | ||
558 | while ((ufshcd_readl(hba, reg) & mask) != val) { | |
596585a2 YG |
559 | if (can_sleep) |
560 | usleep_range(interval_us, interval_us + 50); | |
561 | else | |
562 | udelay(interval_us); | |
5a0b0cb9 SRT |
563 | if (time_after(jiffies, timeout)) { |
564 | if ((ufshcd_readl(hba, reg) & mask) != val) | |
565 | err = -ETIMEDOUT; | |
566 | break; | |
567 | } | |
568 | } | |
569 | ||
570 | return err; | |
571 | } | |
572 | ||
2fbd009b SJ |
573 | /** |
574 | * ufshcd_get_intr_mask - Get the interrupt bit mask | |
8aa29f19 | 575 | * @hba: Pointer to adapter instance |
2fbd009b SJ |
576 | * |
577 | * Returns interrupt bit mask per version | |
578 | */ | |
579 | static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) | |
580 | { | |
c01848c6 YG |
581 | u32 intr_mask = 0; |
582 | ||
583 | switch (hba->ufs_version) { | |
584 | case UFSHCI_VERSION_10: | |
585 | intr_mask = INTERRUPT_MASK_ALL_VER_10; | |
586 | break; | |
c01848c6 YG |
587 | case UFSHCI_VERSION_11: |
588 | case UFSHCI_VERSION_20: | |
589 | intr_mask = INTERRUPT_MASK_ALL_VER_11; | |
590 | break; | |
c01848c6 YG |
591 | case UFSHCI_VERSION_21: |
592 | default: | |
593 | intr_mask = INTERRUPT_MASK_ALL_VER_21; | |
031d1e0f | 594 | break; |
c01848c6 YG |
595 | } |
596 | ||
597 | return intr_mask; | |
2fbd009b SJ |
598 | } |
599 | ||
7a3e97b0 SY |
600 | /** |
601 | * ufshcd_get_ufs_version - Get the UFS version supported by the HBA | |
8aa29f19 | 602 | * @hba: Pointer to adapter instance |
7a3e97b0 SY |
603 | * |
604 | * Returns UFSHCI version supported by the controller | |
605 | */ | |
606 | static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) | |
607 | { | |
0263bcd0 YG |
608 | if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) |
609 | return ufshcd_vops_get_ufs_hci_version(hba); | |
9949e702 | 610 | |
b873a275 | 611 | return ufshcd_readl(hba, REG_UFS_VERSION); |
7a3e97b0 SY |
612 | } |
613 | ||
614 | /** | |
615 | * ufshcd_is_device_present - Check if any device connected to | |
616 | * the host controller | |
5c0c28a8 | 617 | * @hba: pointer to adapter instance |
7a3e97b0 | 618 | * |
c9e6010b | 619 | * Returns true if device present, false if no device detected |
7a3e97b0 | 620 | */ |
c9e6010b | 621 | static inline bool ufshcd_is_device_present(struct ufs_hba *hba) |
7a3e97b0 | 622 | { |
5c0c28a8 | 623 | return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & |
c9e6010b | 624 | DEVICE_PRESENT) ? true : false; |
7a3e97b0 SY |
625 | } |
626 | ||
627 | /** | |
628 | * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status | |
8aa29f19 | 629 | * @lrbp: pointer to local command reference block |
7a3e97b0 SY |
630 | * |
631 | * This function is used to get the OCS field from UTRD | |
632 | * Returns the OCS field in the UTRD | |
633 | */ | |
634 | static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) | |
635 | { | |
e8c8e82a | 636 | return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; |
7a3e97b0 SY |
637 | } |
638 | ||
7a3e97b0 SY |
639 | /** |
640 | * ufshcd_get_tm_free_slot - get a free slot for task management request | |
641 | * @hba: per adapter instance | |
e2933132 | 642 | * @free_slot: pointer to variable with available slot value |
7a3e97b0 | 643 | * |
e2933132 SRT |
644 | * Get a free tag and lock it until ufshcd_put_tm_slot() is called. |
645 | * Returns 0 if free slot is not available, else return 1 with tag value | |
646 | * in @free_slot. | |
7a3e97b0 | 647 | */ |
e2933132 | 648 | static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) |
7a3e97b0 | 649 | { |
e2933132 SRT |
650 | int tag; |
651 | bool ret = false; | |
652 | ||
653 | if (!free_slot) | |
654 | goto out; | |
655 | ||
656 | do { | |
657 | tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); | |
658 | if (tag >= hba->nutmrs) | |
659 | goto out; | |
660 | } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); | |
661 | ||
662 | *free_slot = tag; | |
663 | ret = true; | |
664 | out: | |
665 | return ret; | |
666 | } | |
667 | ||
668 | static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) | |
669 | { | |
670 | clear_bit_unlock(slot, &hba->tm_slots_in_use); | |
7a3e97b0 SY |
671 | } |
672 | ||
673 | /** | |
674 | * ufshcd_utrl_clear - Clear a bit in UTRLCLR register | |
675 | * @hba: per adapter instance | |
676 | * @pos: position of the bit to be cleared | |
677 | */ | |
678 | static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) | |
679 | { | |
1399c5b0 AA |
680 | if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) |
681 | ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); | |
682 | else | |
683 | ufshcd_writel(hba, ~(1 << pos), | |
684 | REG_UTP_TRANSFER_REQ_LIST_CLEAR); | |
685 | } | |
686 | ||
687 | /** | |
688 | * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register | |
689 | * @hba: per adapter instance | |
690 | * @pos: position of the bit to be cleared | |
691 | */ | |
692 | static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) | |
693 | { | |
694 | if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) | |
695 | ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); | |
696 | else | |
697 | ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); | |
7a3e97b0 SY |
698 | } |
699 | ||
a48353f6 YG |
700 | /** |
701 | * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field | |
702 | * @hba: per adapter instance | |
703 | * @tag: position of the bit to be cleared | |
704 | */ | |
705 | static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag) | |
706 | { | |
707 | __clear_bit(tag, &hba->outstanding_reqs); | |
708 | } | |
709 | ||
7a3e97b0 SY |
710 | /** |
711 | * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY | |
712 | * @reg: Register value of host controller status | |
713 | * | |
714 | * Returns integer, 0 on Success and positive value if failed | |
715 | */ | |
716 | static inline int ufshcd_get_lists_status(u32 reg) | |
717 | { | |
6cf16115 | 718 | return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); |
7a3e97b0 SY |
719 | } |
720 | ||
721 | /** | |
722 | * ufshcd_get_uic_cmd_result - Get the UIC command result | |
723 | * @hba: Pointer to adapter instance | |
724 | * | |
725 | * This function gets the result of UIC command completion | |
726 | * Returns 0 on success, non zero value on error | |
727 | */ | |
728 | static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) | |
729 | { | |
b873a275 | 730 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & |
7a3e97b0 SY |
731 | MASK_UIC_COMMAND_RESULT; |
732 | } | |
733 | ||
12b4fdb4 SJ |
734 | /** |
735 | * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command | |
736 | * @hba: Pointer to adapter instance | |
737 | * | |
738 | * This function gets UIC command argument3 | |
739 | * Returns 0 on success, non zero value on error | |
740 | */ | |
741 | static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) | |
742 | { | |
743 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); | |
744 | } | |
745 | ||
7a3e97b0 | 746 | /** |
5a0b0cb9 | 747 | * ufshcd_get_req_rsp - returns the TR response transaction type |
7a3e97b0 | 748 | * @ucd_rsp_ptr: pointer to response UPIU |
7a3e97b0 SY |
749 | */ |
750 | static inline int | |
5a0b0cb9 | 751 | ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) |
7a3e97b0 | 752 | { |
5a0b0cb9 | 753 | return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; |
7a3e97b0 SY |
754 | } |
755 | ||
756 | /** | |
757 | * ufshcd_get_rsp_upiu_result - Get the result from response UPIU | |
758 | * @ucd_rsp_ptr: pointer to response UPIU | |
759 | * | |
760 | * This function gets the response status and scsi_status from response UPIU | |
761 | * Returns the response result code. | |
762 | */ | |
763 | static inline int | |
764 | ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) | |
765 | { | |
766 | return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; | |
767 | } | |
768 | ||
1c2623c5 SJ |
769 | /* |
770 | * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length | |
771 | * from response UPIU | |
772 | * @ucd_rsp_ptr: pointer to response UPIU | |
773 | * | |
774 | * Return the data segment length. | |
775 | */ | |
776 | static inline unsigned int | |
777 | ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr) | |
778 | { | |
779 | return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & | |
780 | MASK_RSP_UPIU_DATA_SEG_LEN; | |
781 | } | |
782 | ||
66ec6d59 SRT |
783 | /** |
784 | * ufshcd_is_exception_event - Check if the device raised an exception event | |
785 | * @ucd_rsp_ptr: pointer to response UPIU | |
786 | * | |
787 | * The function checks if the device raised an exception event indicated in | |
788 | * the Device Information field of response UPIU. | |
789 | * | |
790 | * Returns true if exception is raised, false otherwise. | |
791 | */ | |
792 | static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) | |
793 | { | |
794 | return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & | |
795 | MASK_RSP_EXCEPTION_EVENT ? true : false; | |
796 | } | |
797 | ||
7a3e97b0 | 798 | /** |
7d568652 | 799 | * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. |
7a3e97b0 | 800 | * @hba: per adapter instance |
7a3e97b0 SY |
801 | */ |
802 | static inline void | |
7d568652 | 803 | ufshcd_reset_intr_aggr(struct ufs_hba *hba) |
7a3e97b0 | 804 | { |
7d568652 SJ |
805 | ufshcd_writel(hba, INT_AGGR_ENABLE | |
806 | INT_AGGR_COUNTER_AND_TIMER_RESET, | |
807 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | |
808 | } | |
809 | ||
810 | /** | |
811 | * ufshcd_config_intr_aggr - Configure interrupt aggregation values. | |
812 | * @hba: per adapter instance | |
813 | * @cnt: Interrupt aggregation counter threshold | |
814 | * @tmout: Interrupt aggregation timeout value | |
815 | */ | |
816 | static inline void | |
817 | ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) | |
818 | { | |
819 | ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | | |
820 | INT_AGGR_COUNTER_THLD_VAL(cnt) | | |
821 | INT_AGGR_TIMEOUT_VAL(tmout), | |
822 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | |
7a3e97b0 SY |
823 | } |
824 | ||
b852190e YG |
825 | /** |
826 | * ufshcd_disable_intr_aggr - Disables interrupt aggregation. | |
827 | * @hba: per adapter instance | |
828 | */ | |
829 | static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) | |
830 | { | |
831 | ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | |
832 | } | |
833 | ||
7a3e97b0 SY |
834 | /** |
835 | * ufshcd_enable_run_stop_reg - Enable run-stop registers, | |
836 | * When run-stop registers are set to 1, it indicates the | |
837 | * host controller that it can process the requests | |
838 | * @hba: per adapter instance | |
839 | */ | |
840 | static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) | |
841 | { | |
b873a275 SJ |
842 | ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, |
843 | REG_UTP_TASK_REQ_LIST_RUN_STOP); | |
844 | ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, | |
845 | REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); | |
7a3e97b0 SY |
846 | } |
847 | ||
7a3e97b0 SY |
848 | /** |
849 | * ufshcd_hba_start - Start controller initialization sequence | |
850 | * @hba: per adapter instance | |
851 | */ | |
852 | static inline void ufshcd_hba_start(struct ufs_hba *hba) | |
853 | { | |
b873a275 | 854 | ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); |
7a3e97b0 SY |
855 | } |
856 | ||
857 | /** | |
858 | * ufshcd_is_hba_active - Get controller state | |
859 | * @hba: per adapter instance | |
860 | * | |
c9e6010b | 861 | * Returns false if controller is active, true otherwise |
7a3e97b0 | 862 | */ |
c9e6010b | 863 | static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) |
7a3e97b0 | 864 | { |
4a8eec2b TK |
865 | return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE) |
866 | ? false : true; | |
7a3e97b0 SY |
867 | } |
868 | ||
37113106 YG |
869 | u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) |
870 | { | |
871 | /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ | |
872 | if ((hba->ufs_version == UFSHCI_VERSION_10) || | |
873 | (hba->ufs_version == UFSHCI_VERSION_11)) | |
874 | return UFS_UNIPRO_VER_1_41; | |
875 | else | |
876 | return UFS_UNIPRO_VER_1_6; | |
877 | } | |
878 | EXPORT_SYMBOL(ufshcd_get_local_unipro_ver); | |
879 | ||
880 | static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) | |
881 | { | |
882 | /* | |
883 | * If both host and device support UniPro ver1.6 or later, PA layer | |
884 | * parameters tuning happens during link startup itself. | |
885 | * | |
886 | * We can manually tune PA layer parameters if either host or device | |
887 | * doesn't support UniPro ver 1.6 or later. But to keep manual tuning | |
888 | * logic simple, we will only do manual tuning if local unipro version | |
889 | * doesn't support ver1.6 or later. | |
890 | */ | |
891 | if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6) | |
892 | return true; | |
893 | else | |
894 | return false; | |
895 | } | |
896 | ||
a3cd5ec5 SJ |
897 | static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) |
898 | { | |
899 | int ret = 0; | |
900 | struct ufs_clk_info *clki; | |
901 | struct list_head *head = &hba->clk_list_head; | |
902 | ktime_t start = ktime_get(); | |
903 | bool clk_state_changed = false; | |
904 | ||
566ec9ad | 905 | if (list_empty(head)) |
a3cd5ec5 SJ |
906 | goto out; |
907 | ||
908 | ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); | |
909 | if (ret) | |
910 | return ret; | |
911 | ||
912 | list_for_each_entry(clki, head, list) { | |
913 | if (!IS_ERR_OR_NULL(clki->clk)) { | |
914 | if (scale_up && clki->max_freq) { | |
915 | if (clki->curr_freq == clki->max_freq) | |
916 | continue; | |
917 | ||
918 | clk_state_changed = true; | |
919 | ret = clk_set_rate(clki->clk, clki->max_freq); | |
920 | if (ret) { | |
921 | dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", | |
922 | __func__, clki->name, | |
923 | clki->max_freq, ret); | |
924 | break; | |
925 | } | |
926 | trace_ufshcd_clk_scaling(dev_name(hba->dev), | |
927 | "scaled up", clki->name, | |
928 | clki->curr_freq, | |
929 | clki->max_freq); | |
930 | ||
931 | clki->curr_freq = clki->max_freq; | |
932 | ||
933 | } else if (!scale_up && clki->min_freq) { | |
934 | if (clki->curr_freq == clki->min_freq) | |
935 | continue; | |
936 | ||
937 | clk_state_changed = true; | |
938 | ret = clk_set_rate(clki->clk, clki->min_freq); | |
939 | if (ret) { | |
940 | dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", | |
941 | __func__, clki->name, | |
942 | clki->min_freq, ret); | |
943 | break; | |
944 | } | |
945 | trace_ufshcd_clk_scaling(dev_name(hba->dev), | |
946 | "scaled down", clki->name, | |
947 | clki->curr_freq, | |
948 | clki->min_freq); | |
949 | clki->curr_freq = clki->min_freq; | |
950 | } | |
951 | } | |
952 | dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, | |
953 | clki->name, clk_get_rate(clki->clk)); | |
954 | } | |
955 | ||
956 | ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); | |
957 | ||
958 | out: | |
959 | if (clk_state_changed) | |
960 | trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), | |
961 | (scale_up ? "up" : "down"), | |
962 | ktime_to_us(ktime_sub(ktime_get(), start)), ret); | |
963 | return ret; | |
964 | } | |
965 | ||
966 | /** | |
967 | * ufshcd_is_devfreq_scaling_required - check if scaling is required or not | |
968 | * @hba: per adapter instance | |
969 | * @scale_up: True if scaling up and false if scaling down | |
970 | * | |
971 | * Returns true if scaling is required, false otherwise. | |
972 | */ | |
973 | static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, | |
974 | bool scale_up) | |
975 | { | |
976 | struct ufs_clk_info *clki; | |
977 | struct list_head *head = &hba->clk_list_head; | |
978 | ||
566ec9ad | 979 | if (list_empty(head)) |
a3cd5ec5 SJ |
980 | return false; |
981 | ||
982 | list_for_each_entry(clki, head, list) { | |
983 | if (!IS_ERR_OR_NULL(clki->clk)) { | |
984 | if (scale_up && clki->max_freq) { | |
985 | if (clki->curr_freq == clki->max_freq) | |
986 | continue; | |
987 | return true; | |
988 | } else if (!scale_up && clki->min_freq) { | |
989 | if (clki->curr_freq == clki->min_freq) | |
990 | continue; | |
991 | return true; | |
992 | } | |
993 | } | |
994 | } | |
995 | ||
996 | return false; | |
997 | } | |
998 | ||
999 | static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, | |
1000 | u64 wait_timeout_us) | |
1001 | { | |
1002 | unsigned long flags; | |
1003 | int ret = 0; | |
1004 | u32 tm_doorbell; | |
1005 | u32 tr_doorbell; | |
1006 | bool timeout = false, do_last_check = false; | |
1007 | ktime_t start; | |
1008 | ||
1009 | ufshcd_hold(hba, false); | |
1010 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1011 | /* | |
1012 | * Wait for all the outstanding tasks/transfer requests. | |
1013 | * Verify by checking the doorbell registers are clear. | |
1014 | */ | |
1015 | start = ktime_get(); | |
1016 | do { | |
1017 | if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { | |
1018 | ret = -EBUSY; | |
1019 | goto out; | |
1020 | } | |
1021 | ||
1022 | tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); | |
1023 | tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); | |
1024 | if (!tm_doorbell && !tr_doorbell) { | |
1025 | timeout = false; | |
1026 | break; | |
1027 | } else if (do_last_check) { | |
1028 | break; | |
1029 | } | |
1030 | ||
1031 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1032 | schedule(); | |
1033 | if (ktime_to_us(ktime_sub(ktime_get(), start)) > | |
1034 | wait_timeout_us) { | |
1035 | timeout = true; | |
1036 | /* | |
1037 | * We might have scheduled out for long time so make | |
1038 | * sure to check if doorbells are cleared by this time | |
1039 | * or not. | |
1040 | */ | |
1041 | do_last_check = true; | |
1042 | } | |
1043 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1044 | } while (tm_doorbell || tr_doorbell); | |
1045 | ||
1046 | if (timeout) { | |
1047 | dev_err(hba->dev, | |
1048 | "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n", | |
1049 | __func__, tm_doorbell, tr_doorbell); | |
1050 | ret = -EBUSY; | |
1051 | } | |
1052 | out: | |
1053 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1054 | ufshcd_release(hba); | |
1055 | return ret; | |
1056 | } | |
1057 | ||
1058 | /** | |
1059 | * ufshcd_scale_gear - scale up/down UFS gear | |
1060 | * @hba: per adapter instance | |
1061 | * @scale_up: True for scaling up gear and false for scaling down | |
1062 | * | |
1063 | * Returns 0 for success, | |
1064 | * Returns -EBUSY if scaling can't happen at this time | |
1065 | * Returns non-zero for any other errors | |
1066 | */ | |
1067 | static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) | |
1068 | { | |
1069 | #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1 | |
1070 | int ret = 0; | |
1071 | struct ufs_pa_layer_attr new_pwr_info; | |
1072 | ||
1073 | if (scale_up) { | |
1074 | memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info, | |
1075 | sizeof(struct ufs_pa_layer_attr)); | |
1076 | } else { | |
1077 | memcpy(&new_pwr_info, &hba->pwr_info, | |
1078 | sizeof(struct ufs_pa_layer_attr)); | |
1079 | ||
1080 | if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN | |
1081 | || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) { | |
1082 | /* save the current power mode */ | |
1083 | memcpy(&hba->clk_scaling.saved_pwr_info.info, | |
1084 | &hba->pwr_info, | |
1085 | sizeof(struct ufs_pa_layer_attr)); | |
1086 | ||
1087 | /* scale down gear */ | |
1088 | new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN; | |
1089 | new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN; | |
1090 | } | |
1091 | } | |
1092 | ||
1093 | /* check if the power mode needs to be changed or not? */ | |
1094 | ret = ufshcd_change_power_mode(hba, &new_pwr_info); | |
1095 | ||
1096 | if (ret) | |
1097 | dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", | |
1098 | __func__, ret, | |
1099 | hba->pwr_info.gear_tx, hba->pwr_info.gear_rx, | |
1100 | new_pwr_info.gear_tx, new_pwr_info.gear_rx); | |
1101 | ||
1102 | return ret; | |
1103 | } | |
1104 | ||
1105 | static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) | |
1106 | { | |
1107 | #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */ | |
1108 | int ret = 0; | |
1109 | /* | |
1110 | * make sure that there are no outstanding requests when | |
1111 | * clock scaling is in progress | |
1112 | */ | |
38135535 | 1113 | ufshcd_scsi_block_requests(hba); |
a3cd5ec5 SJ |
1114 | down_write(&hba->clk_scaling_lock); |
1115 | if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { | |
1116 | ret = -EBUSY; | |
1117 | up_write(&hba->clk_scaling_lock); | |
38135535 | 1118 | ufshcd_scsi_unblock_requests(hba); |
a3cd5ec5 SJ |
1119 | } |
1120 | ||
1121 | return ret; | |
1122 | } | |
1123 | ||
1124 | static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba) | |
1125 | { | |
1126 | up_write(&hba->clk_scaling_lock); | |
38135535 | 1127 | ufshcd_scsi_unblock_requests(hba); |
a3cd5ec5 SJ |
1128 | } |
1129 | ||
1130 | /** | |
1131 | * ufshcd_devfreq_scale - scale up/down UFS clocks and gear | |
1132 | * @hba: per adapter instance | |
1133 | * @scale_up: True for scaling up and false for scalin down | |
1134 | * | |
1135 | * Returns 0 for success, | |
1136 | * Returns -EBUSY if scaling can't happen at this time | |
1137 | * Returns non-zero for any other errors | |
1138 | */ | |
1139 | static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) | |
1140 | { | |
1141 | int ret = 0; | |
1142 | ||
401f1e44 SJ |
1143 | /* let's not get into low power until clock scaling is completed */ |
1144 | ufshcd_hold(hba, false); | |
1145 | ||
a3cd5ec5 SJ |
1146 | ret = ufshcd_clock_scaling_prepare(hba); |
1147 | if (ret) | |
1148 | return ret; | |
1149 | ||
1150 | /* scale down the gear before scaling down clocks */ | |
1151 | if (!scale_up) { | |
1152 | ret = ufshcd_scale_gear(hba, false); | |
1153 | if (ret) | |
1154 | goto out; | |
1155 | } | |
1156 | ||
1157 | ret = ufshcd_scale_clks(hba, scale_up); | |
1158 | if (ret) { | |
1159 | if (!scale_up) | |
1160 | ufshcd_scale_gear(hba, true); | |
1161 | goto out; | |
1162 | } | |
1163 | ||
1164 | /* scale up the gear after scaling up clocks */ | |
1165 | if (scale_up) { | |
1166 | ret = ufshcd_scale_gear(hba, true); | |
1167 | if (ret) { | |
1168 | ufshcd_scale_clks(hba, false); | |
1169 | goto out; | |
1170 | } | |
1171 | } | |
1172 | ||
1173 | ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); | |
1174 | ||
1175 | out: | |
1176 | ufshcd_clock_scaling_unprepare(hba); | |
401f1e44 | 1177 | ufshcd_release(hba); |
a3cd5ec5 SJ |
1178 | return ret; |
1179 | } | |
1180 | ||
401f1e44 SJ |
1181 | static void ufshcd_clk_scaling_suspend_work(struct work_struct *work) |
1182 | { | |
1183 | struct ufs_hba *hba = container_of(work, struct ufs_hba, | |
1184 | clk_scaling.suspend_work); | |
1185 | unsigned long irq_flags; | |
1186 | ||
1187 | spin_lock_irqsave(hba->host->host_lock, irq_flags); | |
1188 | if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { | |
1189 | spin_unlock_irqrestore(hba->host->host_lock, irq_flags); | |
1190 | return; | |
1191 | } | |
1192 | hba->clk_scaling.is_suspended = true; | |
1193 | spin_unlock_irqrestore(hba->host->host_lock, irq_flags); | |
1194 | ||
1195 | __ufshcd_suspend_clkscaling(hba); | |
1196 | } | |
1197 | ||
1198 | static void ufshcd_clk_scaling_resume_work(struct work_struct *work) | |
1199 | { | |
1200 | struct ufs_hba *hba = container_of(work, struct ufs_hba, | |
1201 | clk_scaling.resume_work); | |
1202 | unsigned long irq_flags; | |
1203 | ||
1204 | spin_lock_irqsave(hba->host->host_lock, irq_flags); | |
1205 | if (!hba->clk_scaling.is_suspended) { | |
1206 | spin_unlock_irqrestore(hba->host->host_lock, irq_flags); | |
1207 | return; | |
1208 | } | |
1209 | hba->clk_scaling.is_suspended = false; | |
1210 | spin_unlock_irqrestore(hba->host->host_lock, irq_flags); | |
1211 | ||
1212 | devfreq_resume_device(hba->devfreq); | |
1213 | } | |
1214 | ||
a3cd5ec5 SJ |
1215 | static int ufshcd_devfreq_target(struct device *dev, |
1216 | unsigned long *freq, u32 flags) | |
1217 | { | |
1218 | int ret = 0; | |
1219 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
1220 | ktime_t start; | |
401f1e44 | 1221 | bool scale_up, sched_clk_scaling_suspend_work = false; |
092b4558 BA |
1222 | struct list_head *clk_list = &hba->clk_list_head; |
1223 | struct ufs_clk_info *clki; | |
a3cd5ec5 SJ |
1224 | unsigned long irq_flags; |
1225 | ||
1226 | if (!ufshcd_is_clkscaling_supported(hba)) | |
1227 | return -EINVAL; | |
1228 | ||
a3cd5ec5 SJ |
1229 | spin_lock_irqsave(hba->host->host_lock, irq_flags); |
1230 | if (ufshcd_eh_in_progress(hba)) { | |
1231 | spin_unlock_irqrestore(hba->host->host_lock, irq_flags); | |
1232 | return 0; | |
1233 | } | |
1234 | ||
401f1e44 SJ |
1235 | if (!hba->clk_scaling.active_reqs) |
1236 | sched_clk_scaling_suspend_work = true; | |
1237 | ||
092b4558 BA |
1238 | if (list_empty(clk_list)) { |
1239 | spin_unlock_irqrestore(hba->host->host_lock, irq_flags); | |
1240 | goto out; | |
1241 | } | |
1242 | ||
1243 | clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); | |
1244 | scale_up = (*freq == clki->max_freq) ? true : false; | |
401f1e44 SJ |
1245 | if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) { |
1246 | spin_unlock_irqrestore(hba->host->host_lock, irq_flags); | |
1247 | ret = 0; | |
1248 | goto out; /* no state change required */ | |
a3cd5ec5 SJ |
1249 | } |
1250 | spin_unlock_irqrestore(hba->host->host_lock, irq_flags); | |
1251 | ||
1252 | start = ktime_get(); | |
a3cd5ec5 SJ |
1253 | ret = ufshcd_devfreq_scale(hba, scale_up); |
1254 | ||
a3cd5ec5 SJ |
1255 | trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), |
1256 | (scale_up ? "up" : "down"), | |
1257 | ktime_to_us(ktime_sub(ktime_get(), start)), ret); | |
1258 | ||
401f1e44 SJ |
1259 | out: |
1260 | if (sched_clk_scaling_suspend_work) | |
1261 | queue_work(hba->clk_scaling.workq, | |
1262 | &hba->clk_scaling.suspend_work); | |
1263 | ||
a3cd5ec5 SJ |
1264 | return ret; |
1265 | } | |
1266 | ||
1267 | ||
1268 | static int ufshcd_devfreq_get_dev_status(struct device *dev, | |
1269 | struct devfreq_dev_status *stat) | |
1270 | { | |
1271 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
1272 | struct ufs_clk_scaling *scaling = &hba->clk_scaling; | |
1273 | unsigned long flags; | |
1274 | ||
1275 | if (!ufshcd_is_clkscaling_supported(hba)) | |
1276 | return -EINVAL; | |
1277 | ||
1278 | memset(stat, 0, sizeof(*stat)); | |
1279 | ||
1280 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1281 | if (!scaling->window_start_t) | |
1282 | goto start_window; | |
1283 | ||
1284 | if (scaling->is_busy_started) | |
1285 | scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), | |
1286 | scaling->busy_start_t)); | |
1287 | ||
1288 | stat->total_time = jiffies_to_usecs((long)jiffies - | |
1289 | (long)scaling->window_start_t); | |
1290 | stat->busy_time = scaling->tot_busy_t; | |
1291 | start_window: | |
1292 | scaling->window_start_t = jiffies; | |
1293 | scaling->tot_busy_t = 0; | |
1294 | ||
1295 | if (hba->outstanding_reqs) { | |
1296 | scaling->busy_start_t = ktime_get(); | |
1297 | scaling->is_busy_started = true; | |
1298 | } else { | |
1299 | scaling->busy_start_t = 0; | |
1300 | scaling->is_busy_started = false; | |
1301 | } | |
1302 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1303 | return 0; | |
1304 | } | |
1305 | ||
1306 | static struct devfreq_dev_profile ufs_devfreq_profile = { | |
1307 | .polling_ms = 100, | |
1308 | .target = ufshcd_devfreq_target, | |
1309 | .get_dev_status = ufshcd_devfreq_get_dev_status, | |
1310 | }; | |
1311 | ||
deac444f BA |
1312 | static int ufshcd_devfreq_init(struct ufs_hba *hba) |
1313 | { | |
092b4558 BA |
1314 | struct list_head *clk_list = &hba->clk_list_head; |
1315 | struct ufs_clk_info *clki; | |
deac444f BA |
1316 | struct devfreq *devfreq; |
1317 | int ret; | |
1318 | ||
092b4558 BA |
1319 | /* Skip devfreq if we don't have any clocks in the list */ |
1320 | if (list_empty(clk_list)) | |
1321 | return 0; | |
1322 | ||
1323 | clki = list_first_entry(clk_list, struct ufs_clk_info, list); | |
1324 | dev_pm_opp_add(hba->dev, clki->min_freq, 0); | |
1325 | dev_pm_opp_add(hba->dev, clki->max_freq, 0); | |
1326 | ||
1327 | devfreq = devfreq_add_device(hba->dev, | |
deac444f BA |
1328 | &ufs_devfreq_profile, |
1329 | DEVFREQ_GOV_SIMPLE_ONDEMAND, | |
1330 | NULL); | |
1331 | if (IS_ERR(devfreq)) { | |
1332 | ret = PTR_ERR(devfreq); | |
1333 | dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); | |
092b4558 BA |
1334 | |
1335 | dev_pm_opp_remove(hba->dev, clki->min_freq); | |
1336 | dev_pm_opp_remove(hba->dev, clki->max_freq); | |
deac444f BA |
1337 | return ret; |
1338 | } | |
1339 | ||
1340 | hba->devfreq = devfreq; | |
1341 | ||
1342 | return 0; | |
1343 | } | |
1344 | ||
092b4558 BA |
1345 | static void ufshcd_devfreq_remove(struct ufs_hba *hba) |
1346 | { | |
1347 | struct list_head *clk_list = &hba->clk_list_head; | |
1348 | struct ufs_clk_info *clki; | |
1349 | ||
1350 | if (!hba->devfreq) | |
1351 | return; | |
1352 | ||
1353 | devfreq_remove_device(hba->devfreq); | |
1354 | hba->devfreq = NULL; | |
1355 | ||
1356 | clki = list_first_entry(clk_list, struct ufs_clk_info, list); | |
1357 | dev_pm_opp_remove(hba->dev, clki->min_freq); | |
1358 | dev_pm_opp_remove(hba->dev, clki->max_freq); | |
1359 | } | |
1360 | ||
401f1e44 SJ |
1361 | static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba) |
1362 | { | |
1363 | unsigned long flags; | |
1364 | ||
1365 | devfreq_suspend_device(hba->devfreq); | |
1366 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1367 | hba->clk_scaling.window_start_t = 0; | |
1368 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1369 | } | |
a3cd5ec5 | 1370 | |
a508253d GB |
1371 | static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) |
1372 | { | |
401f1e44 SJ |
1373 | unsigned long flags; |
1374 | bool suspend = false; | |
1375 | ||
fcb0c4b0 ST |
1376 | if (!ufshcd_is_clkscaling_supported(hba)) |
1377 | return; | |
1378 | ||
401f1e44 SJ |
1379 | spin_lock_irqsave(hba->host->host_lock, flags); |
1380 | if (!hba->clk_scaling.is_suspended) { | |
1381 | suspend = true; | |
1382 | hba->clk_scaling.is_suspended = true; | |
1383 | } | |
1384 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1385 | ||
1386 | if (suspend) | |
1387 | __ufshcd_suspend_clkscaling(hba); | |
a508253d GB |
1388 | } |
1389 | ||
1390 | static void ufshcd_resume_clkscaling(struct ufs_hba *hba) | |
1391 | { | |
401f1e44 SJ |
1392 | unsigned long flags; |
1393 | bool resume = false; | |
1394 | ||
1395 | if (!ufshcd_is_clkscaling_supported(hba)) | |
1396 | return; | |
1397 | ||
1398 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1399 | if (hba->clk_scaling.is_suspended) { | |
1400 | resume = true; | |
1401 | hba->clk_scaling.is_suspended = false; | |
1402 | } | |
1403 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1404 | ||
1405 | if (resume) | |
1406 | devfreq_resume_device(hba->devfreq); | |
fcb0c4b0 ST |
1407 | } |
1408 | ||
1409 | static ssize_t ufshcd_clkscale_enable_show(struct device *dev, | |
1410 | struct device_attribute *attr, char *buf) | |
1411 | { | |
1412 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
1413 | ||
1414 | return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed); | |
1415 | } | |
1416 | ||
1417 | static ssize_t ufshcd_clkscale_enable_store(struct device *dev, | |
1418 | struct device_attribute *attr, const char *buf, size_t count) | |
1419 | { | |
1420 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
1421 | u32 value; | |
1422 | int err; | |
1423 | ||
1424 | if (kstrtou32(buf, 0, &value)) | |
1425 | return -EINVAL; | |
1426 | ||
1427 | value = !!value; | |
1428 | if (value == hba->clk_scaling.is_allowed) | |
1429 | goto out; | |
1430 | ||
1431 | pm_runtime_get_sync(hba->dev); | |
1432 | ufshcd_hold(hba, false); | |
1433 | ||
401f1e44 SJ |
1434 | cancel_work_sync(&hba->clk_scaling.suspend_work); |
1435 | cancel_work_sync(&hba->clk_scaling.resume_work); | |
1436 | ||
1437 | hba->clk_scaling.is_allowed = value; | |
1438 | ||
fcb0c4b0 ST |
1439 | if (value) { |
1440 | ufshcd_resume_clkscaling(hba); | |
1441 | } else { | |
1442 | ufshcd_suspend_clkscaling(hba); | |
a3cd5ec5 | 1443 | err = ufshcd_devfreq_scale(hba, true); |
fcb0c4b0 ST |
1444 | if (err) |
1445 | dev_err(hba->dev, "%s: failed to scale clocks up %d\n", | |
1446 | __func__, err); | |
1447 | } | |
fcb0c4b0 ST |
1448 | |
1449 | ufshcd_release(hba); | |
1450 | pm_runtime_put_sync(hba->dev); | |
1451 | out: | |
1452 | return count; | |
a508253d GB |
1453 | } |
1454 | ||
a3cd5ec5 SJ |
1455 | static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba) |
1456 | { | |
1457 | hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; | |
1458 | hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; | |
1459 | sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); | |
1460 | hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; | |
1461 | hba->clk_scaling.enable_attr.attr.mode = 0644; | |
1462 | if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) | |
1463 | dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); | |
1464 | } | |
1465 | ||
1ab27c9c ST |
1466 | static void ufshcd_ungate_work(struct work_struct *work) |
1467 | { | |
1468 | int ret; | |
1469 | unsigned long flags; | |
1470 | struct ufs_hba *hba = container_of(work, struct ufs_hba, | |
1471 | clk_gating.ungate_work); | |
1472 | ||
1473 | cancel_delayed_work_sync(&hba->clk_gating.gate_work); | |
1474 | ||
1475 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1476 | if (hba->clk_gating.state == CLKS_ON) { | |
1477 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1478 | goto unblock_reqs; | |
1479 | } | |
1480 | ||
1481 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1482 | ufshcd_setup_clocks(hba, true); | |
1483 | ||
1484 | /* Exit from hibern8 */ | |
1485 | if (ufshcd_can_hibern8_during_gating(hba)) { | |
1486 | /* Prevent gating in this path */ | |
1487 | hba->clk_gating.is_suspended = true; | |
1488 | if (ufshcd_is_link_hibern8(hba)) { | |
1489 | ret = ufshcd_uic_hibern8_exit(hba); | |
1490 | if (ret) | |
1491 | dev_err(hba->dev, "%s: hibern8 exit failed %d\n", | |
1492 | __func__, ret); | |
1493 | else | |
1494 | ufshcd_set_link_active(hba); | |
1495 | } | |
1496 | hba->clk_gating.is_suspended = false; | |
1497 | } | |
1498 | unblock_reqs: | |
38135535 | 1499 | ufshcd_scsi_unblock_requests(hba); |
1ab27c9c ST |
1500 | } |
1501 | ||
1502 | /** | |
1503 | * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. | |
1504 | * Also, exit from hibern8 mode and set the link as active. | |
1505 | * @hba: per adapter instance | |
1506 | * @async: This indicates whether caller should ungate clocks asynchronously. | |
1507 | */ | |
1508 | int ufshcd_hold(struct ufs_hba *hba, bool async) | |
1509 | { | |
1510 | int rc = 0; | |
1511 | unsigned long flags; | |
1512 | ||
1513 | if (!ufshcd_is_clkgating_allowed(hba)) | |
1514 | goto out; | |
1ab27c9c ST |
1515 | spin_lock_irqsave(hba->host->host_lock, flags); |
1516 | hba->clk_gating.active_reqs++; | |
1517 | ||
53c12d0e YG |
1518 | if (ufshcd_eh_in_progress(hba)) { |
1519 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1520 | return 0; | |
1521 | } | |
1522 | ||
856b3483 | 1523 | start: |
1ab27c9c ST |
1524 | switch (hba->clk_gating.state) { |
1525 | case CLKS_ON: | |
f2a785ac VG |
1526 | /* |
1527 | * Wait for the ungate work to complete if in progress. | |
1528 | * Though the clocks may be in ON state, the link could | |
1529 | * still be in hibner8 state if hibern8 is allowed | |
1530 | * during clock gating. | |
1531 | * Make sure we exit hibern8 state also in addition to | |
1532 | * clocks being ON. | |
1533 | */ | |
1534 | if (ufshcd_can_hibern8_during_gating(hba) && | |
1535 | ufshcd_is_link_hibern8(hba)) { | |
1536 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1537 | flush_work(&hba->clk_gating.ungate_work); | |
1538 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1539 | goto start; | |
1540 | } | |
1ab27c9c ST |
1541 | break; |
1542 | case REQ_CLKS_OFF: | |
1543 | if (cancel_delayed_work(&hba->clk_gating.gate_work)) { | |
1544 | hba->clk_gating.state = CLKS_ON; | |
7ff5ab47 SJ |
1545 | trace_ufshcd_clk_gating(dev_name(hba->dev), |
1546 | hba->clk_gating.state); | |
1ab27c9c ST |
1547 | break; |
1548 | } | |
1549 | /* | |
9c490d2d | 1550 | * If we are here, it means gating work is either done or |
1ab27c9c ST |
1551 | * currently running. Hence, fall through to cancel gating |
1552 | * work and to enable clocks. | |
1553 | */ | |
1554 | case CLKS_OFF: | |
38135535 | 1555 | ufshcd_scsi_block_requests(hba); |
1ab27c9c | 1556 | hba->clk_gating.state = REQ_CLKS_ON; |
7ff5ab47 SJ |
1557 | trace_ufshcd_clk_gating(dev_name(hba->dev), |
1558 | hba->clk_gating.state); | |
10e5e375 VV |
1559 | queue_work(hba->clk_gating.clk_gating_workq, |
1560 | &hba->clk_gating.ungate_work); | |
1ab27c9c ST |
1561 | /* |
1562 | * fall through to check if we should wait for this | |
1563 | * work to be done or not. | |
1564 | */ | |
1565 | case REQ_CLKS_ON: | |
1566 | if (async) { | |
1567 | rc = -EAGAIN; | |
1568 | hba->clk_gating.active_reqs--; | |
1569 | break; | |
1570 | } | |
1571 | ||
1572 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1573 | flush_work(&hba->clk_gating.ungate_work); | |
1574 | /* Make sure state is CLKS_ON before returning */ | |
856b3483 | 1575 | spin_lock_irqsave(hba->host->host_lock, flags); |
1ab27c9c ST |
1576 | goto start; |
1577 | default: | |
1578 | dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", | |
1579 | __func__, hba->clk_gating.state); | |
1580 | break; | |
1581 | } | |
1582 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1583 | out: | |
1584 | return rc; | |
1585 | } | |
6e3fd44d | 1586 | EXPORT_SYMBOL_GPL(ufshcd_hold); |
1ab27c9c ST |
1587 | |
1588 | static void ufshcd_gate_work(struct work_struct *work) | |
1589 | { | |
1590 | struct ufs_hba *hba = container_of(work, struct ufs_hba, | |
1591 | clk_gating.gate_work.work); | |
1592 | unsigned long flags; | |
1593 | ||
1594 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3f0c06de VG |
1595 | /* |
1596 | * In case you are here to cancel this work the gating state | |
1597 | * would be marked as REQ_CLKS_ON. In this case save time by | |
1598 | * skipping the gating work and exit after changing the clock | |
1599 | * state to CLKS_ON. | |
1600 | */ | |
1601 | if (hba->clk_gating.is_suspended || | |
1602 | (hba->clk_gating.state == REQ_CLKS_ON)) { | |
1ab27c9c | 1603 | hba->clk_gating.state = CLKS_ON; |
7ff5ab47 SJ |
1604 | trace_ufshcd_clk_gating(dev_name(hba->dev), |
1605 | hba->clk_gating.state); | |
1ab27c9c ST |
1606 | goto rel_lock; |
1607 | } | |
1608 | ||
1609 | if (hba->clk_gating.active_reqs | |
1610 | || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL | |
1611 | || hba->lrb_in_use || hba->outstanding_tasks | |
1612 | || hba->active_uic_cmd || hba->uic_async_done) | |
1613 | goto rel_lock; | |
1614 | ||
1615 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1616 | ||
1617 | /* put the link into hibern8 mode before turning off clocks */ | |
1618 | if (ufshcd_can_hibern8_during_gating(hba)) { | |
1619 | if (ufshcd_uic_hibern8_enter(hba)) { | |
1620 | hba->clk_gating.state = CLKS_ON; | |
7ff5ab47 SJ |
1621 | trace_ufshcd_clk_gating(dev_name(hba->dev), |
1622 | hba->clk_gating.state); | |
1ab27c9c ST |
1623 | goto out; |
1624 | } | |
1625 | ufshcd_set_link_hibern8(hba); | |
1626 | } | |
1627 | ||
1628 | if (!ufshcd_is_link_active(hba)) | |
1629 | ufshcd_setup_clocks(hba, false); | |
1630 | else | |
1631 | /* If link is active, device ref_clk can't be switched off */ | |
1632 | __ufshcd_setup_clocks(hba, false, true); | |
1633 | ||
1634 | /* | |
1635 | * In case you are here to cancel this work the gating state | |
1636 | * would be marked as REQ_CLKS_ON. In this case keep the state | |
1637 | * as REQ_CLKS_ON which would anyway imply that clocks are off | |
1638 | * and a request to turn them on is pending. By doing this way, | |
1639 | * we keep the state machine in tact and this would ultimately | |
1640 | * prevent from doing cancel work multiple times when there are | |
1641 | * new requests arriving before the current cancel work is done. | |
1642 | */ | |
1643 | spin_lock_irqsave(hba->host->host_lock, flags); | |
7ff5ab47 | 1644 | if (hba->clk_gating.state == REQ_CLKS_OFF) { |
1ab27c9c | 1645 | hba->clk_gating.state = CLKS_OFF; |
7ff5ab47 SJ |
1646 | trace_ufshcd_clk_gating(dev_name(hba->dev), |
1647 | hba->clk_gating.state); | |
1648 | } | |
1ab27c9c ST |
1649 | rel_lock: |
1650 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1651 | out: | |
1652 | return; | |
1653 | } | |
1654 | ||
1655 | /* host lock must be held before calling this variant */ | |
1656 | static void __ufshcd_release(struct ufs_hba *hba) | |
1657 | { | |
1658 | if (!ufshcd_is_clkgating_allowed(hba)) | |
1659 | return; | |
1660 | ||
1661 | hba->clk_gating.active_reqs--; | |
1662 | ||
1663 | if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended | |
1664 | || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL | |
1665 | || hba->lrb_in_use || hba->outstanding_tasks | |
53c12d0e YG |
1666 | || hba->active_uic_cmd || hba->uic_async_done |
1667 | || ufshcd_eh_in_progress(hba)) | |
1ab27c9c ST |
1668 | return; |
1669 | ||
1670 | hba->clk_gating.state = REQ_CLKS_OFF; | |
7ff5ab47 | 1671 | trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); |
f4bb7704 EG |
1672 | queue_delayed_work(hba->clk_gating.clk_gating_workq, |
1673 | &hba->clk_gating.gate_work, | |
1674 | msecs_to_jiffies(hba->clk_gating.delay_ms)); | |
1ab27c9c ST |
1675 | } |
1676 | ||
1677 | void ufshcd_release(struct ufs_hba *hba) | |
1678 | { | |
1679 | unsigned long flags; | |
1680 | ||
1681 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1682 | __ufshcd_release(hba); | |
1683 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1684 | } | |
6e3fd44d | 1685 | EXPORT_SYMBOL_GPL(ufshcd_release); |
1ab27c9c ST |
1686 | |
1687 | static ssize_t ufshcd_clkgate_delay_show(struct device *dev, | |
1688 | struct device_attribute *attr, char *buf) | |
1689 | { | |
1690 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
1691 | ||
1692 | return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms); | |
1693 | } | |
1694 | ||
1695 | static ssize_t ufshcd_clkgate_delay_store(struct device *dev, | |
1696 | struct device_attribute *attr, const char *buf, size_t count) | |
1697 | { | |
1698 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
1699 | unsigned long flags, value; | |
1700 | ||
1701 | if (kstrtoul(buf, 0, &value)) | |
1702 | return -EINVAL; | |
1703 | ||
1704 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1705 | hba->clk_gating.delay_ms = value; | |
1706 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1707 | return count; | |
1708 | } | |
1709 | ||
b427411a ST |
1710 | static ssize_t ufshcd_clkgate_enable_show(struct device *dev, |
1711 | struct device_attribute *attr, char *buf) | |
1712 | { | |
1713 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
1714 | ||
1715 | return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled); | |
1716 | } | |
1717 | ||
1718 | static ssize_t ufshcd_clkgate_enable_store(struct device *dev, | |
1719 | struct device_attribute *attr, const char *buf, size_t count) | |
1720 | { | |
1721 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
1722 | unsigned long flags; | |
1723 | u32 value; | |
1724 | ||
1725 | if (kstrtou32(buf, 0, &value)) | |
1726 | return -EINVAL; | |
1727 | ||
1728 | value = !!value; | |
1729 | if (value == hba->clk_gating.is_enabled) | |
1730 | goto out; | |
1731 | ||
1732 | if (value) { | |
1733 | ufshcd_release(hba); | |
1734 | } else { | |
1735 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1736 | hba->clk_gating.active_reqs++; | |
1737 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1738 | } | |
1739 | ||
1740 | hba->clk_gating.is_enabled = value; | |
1741 | out: | |
1742 | return count; | |
1743 | } | |
1744 | ||
eebcc196 VG |
1745 | static void ufshcd_init_clk_scaling(struct ufs_hba *hba) |
1746 | { | |
1747 | char wq_name[sizeof("ufs_clkscaling_00")]; | |
1748 | ||
1749 | if (!ufshcd_is_clkscaling_supported(hba)) | |
1750 | return; | |
1751 | ||
1752 | INIT_WORK(&hba->clk_scaling.suspend_work, | |
1753 | ufshcd_clk_scaling_suspend_work); | |
1754 | INIT_WORK(&hba->clk_scaling.resume_work, | |
1755 | ufshcd_clk_scaling_resume_work); | |
1756 | ||
1757 | snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", | |
1758 | hba->host->host_no); | |
1759 | hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); | |
1760 | ||
1761 | ufshcd_clkscaling_init_sysfs(hba); | |
1762 | } | |
1763 | ||
1764 | static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) | |
1765 | { | |
1766 | if (!ufshcd_is_clkscaling_supported(hba)) | |
1767 | return; | |
1768 | ||
1769 | destroy_workqueue(hba->clk_scaling.workq); | |
1770 | ufshcd_devfreq_remove(hba); | |
1771 | } | |
1772 | ||
1ab27c9c ST |
1773 | static void ufshcd_init_clk_gating(struct ufs_hba *hba) |
1774 | { | |
10e5e375 VV |
1775 | char wq_name[sizeof("ufs_clk_gating_00")]; |
1776 | ||
1ab27c9c ST |
1777 | if (!ufshcd_is_clkgating_allowed(hba)) |
1778 | return; | |
1779 | ||
1780 | hba->clk_gating.delay_ms = 150; | |
1781 | INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); | |
1782 | INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); | |
1783 | ||
10e5e375 VV |
1784 | snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d", |
1785 | hba->host->host_no); | |
1786 | hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, | |
1787 | WQ_MEM_RECLAIM); | |
1788 | ||
b427411a ST |
1789 | hba->clk_gating.is_enabled = true; |
1790 | ||
1ab27c9c ST |
1791 | hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; |
1792 | hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; | |
1793 | sysfs_attr_init(&hba->clk_gating.delay_attr.attr); | |
1794 | hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; | |
b427411a | 1795 | hba->clk_gating.delay_attr.attr.mode = 0644; |
1ab27c9c ST |
1796 | if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) |
1797 | dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); | |
b427411a ST |
1798 | |
1799 | hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; | |
1800 | hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; | |
1801 | sysfs_attr_init(&hba->clk_gating.enable_attr.attr); | |
1802 | hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; | |
1803 | hba->clk_gating.enable_attr.attr.mode = 0644; | |
1804 | if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) | |
1805 | dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); | |
1ab27c9c ST |
1806 | } |
1807 | ||
1808 | static void ufshcd_exit_clk_gating(struct ufs_hba *hba) | |
1809 | { | |
1810 | if (!ufshcd_is_clkgating_allowed(hba)) | |
1811 | return; | |
1812 | device_remove_file(hba->dev, &hba->clk_gating.delay_attr); | |
b427411a | 1813 | device_remove_file(hba->dev, &hba->clk_gating.enable_attr); |
97cd6805 AM |
1814 | cancel_work_sync(&hba->clk_gating.ungate_work); |
1815 | cancel_delayed_work_sync(&hba->clk_gating.gate_work); | |
10e5e375 | 1816 | destroy_workqueue(hba->clk_gating.clk_gating_workq); |
1ab27c9c ST |
1817 | } |
1818 | ||
856b3483 ST |
1819 | /* Must be called with host lock acquired */ |
1820 | static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) | |
1821 | { | |
401f1e44 SJ |
1822 | bool queue_resume_work = false; |
1823 | ||
fcb0c4b0 | 1824 | if (!ufshcd_is_clkscaling_supported(hba)) |
856b3483 ST |
1825 | return; |
1826 | ||
401f1e44 SJ |
1827 | if (!hba->clk_scaling.active_reqs++) |
1828 | queue_resume_work = true; | |
1829 | ||
1830 | if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress) | |
1831 | return; | |
1832 | ||
1833 | if (queue_resume_work) | |
1834 | queue_work(hba->clk_scaling.workq, | |
1835 | &hba->clk_scaling.resume_work); | |
1836 | ||
1837 | if (!hba->clk_scaling.window_start_t) { | |
1838 | hba->clk_scaling.window_start_t = jiffies; | |
1839 | hba->clk_scaling.tot_busy_t = 0; | |
1840 | hba->clk_scaling.is_busy_started = false; | |
1841 | } | |
1842 | ||
856b3483 ST |
1843 | if (!hba->clk_scaling.is_busy_started) { |
1844 | hba->clk_scaling.busy_start_t = ktime_get(); | |
1845 | hba->clk_scaling.is_busy_started = true; | |
1846 | } | |
1847 | } | |
1848 | ||
1849 | static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) | |
1850 | { | |
1851 | struct ufs_clk_scaling *scaling = &hba->clk_scaling; | |
1852 | ||
fcb0c4b0 | 1853 | if (!ufshcd_is_clkscaling_supported(hba)) |
856b3483 ST |
1854 | return; |
1855 | ||
1856 | if (!hba->outstanding_reqs && scaling->is_busy_started) { | |
1857 | scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), | |
1858 | scaling->busy_start_t)); | |
8b0e1953 | 1859 | scaling->busy_start_t = 0; |
856b3483 ST |
1860 | scaling->is_busy_started = false; |
1861 | } | |
1862 | } | |
7a3e97b0 SY |
1863 | /** |
1864 | * ufshcd_send_command - Send SCSI or device management commands | |
1865 | * @hba: per adapter instance | |
1866 | * @task_tag: Task tag of the command | |
1867 | */ | |
1868 | static inline | |
1869 | void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) | |
1870 | { | |
ff8e20c6 | 1871 | hba->lrb[task_tag].issue_time_stamp = ktime_get(); |
09017188 | 1872 | hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0); |
856b3483 | 1873 | ufshcd_clk_scaling_start_busy(hba); |
7a3e97b0 | 1874 | __set_bit(task_tag, &hba->outstanding_reqs); |
b873a275 | 1875 | ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
ad1a1b9c GB |
1876 | /* Make sure that doorbell is committed immediately */ |
1877 | wmb(); | |
1a07f2d9 | 1878 | ufshcd_add_command_trace(hba, task_tag, "send"); |
7a3e97b0 SY |
1879 | } |
1880 | ||
1881 | /** | |
1882 | * ufshcd_copy_sense_data - Copy sense data in case of check condition | |
8aa29f19 | 1883 | * @lrbp: pointer to local reference block |
7a3e97b0 SY |
1884 | */ |
1885 | static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) | |
1886 | { | |
1887 | int len; | |
1c2623c5 SJ |
1888 | if (lrbp->sense_buffer && |
1889 | ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { | |
e3ce73d6 YG |
1890 | int len_to_copy; |
1891 | ||
5a0b0cb9 | 1892 | len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); |
e3ce73d6 YG |
1893 | len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len); |
1894 | ||
7a3e97b0 | 1895 | memcpy(lrbp->sense_buffer, |
5a0b0cb9 | 1896 | lrbp->ucd_rsp_ptr->sr.sense_data, |
dcea0bfb | 1897 | min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE)); |
7a3e97b0 SY |
1898 | } |
1899 | } | |
1900 | ||
68078d5c DR |
1901 | /** |
1902 | * ufshcd_copy_query_response() - Copy the Query Response and the data | |
1903 | * descriptor | |
1904 | * @hba: per adapter instance | |
8aa29f19 | 1905 | * @lrbp: pointer to local reference block |
68078d5c DR |
1906 | */ |
1907 | static | |
c6d4a831 | 1908 | int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) |
68078d5c DR |
1909 | { |
1910 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; | |
1911 | ||
68078d5c | 1912 | memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); |
68078d5c | 1913 | |
68078d5c DR |
1914 | /* Get the descriptor */ |
1915 | if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { | |
d44a5f98 | 1916 | u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + |
68078d5c | 1917 | GENERAL_UPIU_REQUEST_SIZE; |
c6d4a831 DR |
1918 | u16 resp_len; |
1919 | u16 buf_len; | |
68078d5c DR |
1920 | |
1921 | /* data segment length */ | |
c6d4a831 | 1922 | resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & |
68078d5c | 1923 | MASK_QUERY_DATA_SEG_LEN; |
ea2aab24 SRT |
1924 | buf_len = be16_to_cpu( |
1925 | hba->dev_cmd.query.request.upiu_req.length); | |
c6d4a831 DR |
1926 | if (likely(buf_len >= resp_len)) { |
1927 | memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); | |
1928 | } else { | |
1929 | dev_warn(hba->dev, | |
1930 | "%s: Response size is bigger than buffer", | |
1931 | __func__); | |
1932 | return -EINVAL; | |
1933 | } | |
68078d5c | 1934 | } |
c6d4a831 DR |
1935 | |
1936 | return 0; | |
68078d5c DR |
1937 | } |
1938 | ||
7a3e97b0 SY |
1939 | /** |
1940 | * ufshcd_hba_capabilities - Read controller capabilities | |
1941 | * @hba: per adapter instance | |
1942 | */ | |
1943 | static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) | |
1944 | { | |
b873a275 | 1945 | hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); |
7a3e97b0 SY |
1946 | |
1947 | /* nutrs and nutmrs are 0 based values */ | |
1948 | hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; | |
1949 | hba->nutmrs = | |
1950 | ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; | |
1951 | } | |
1952 | ||
1953 | /** | |
6ccf44fe SJ |
1954 | * ufshcd_ready_for_uic_cmd - Check if controller is ready |
1955 | * to accept UIC commands | |
7a3e97b0 | 1956 | * @hba: per adapter instance |
6ccf44fe SJ |
1957 | * Return true on success, else false |
1958 | */ | |
1959 | static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) | |
1960 | { | |
1961 | if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) | |
1962 | return true; | |
1963 | else | |
1964 | return false; | |
1965 | } | |
1966 | ||
53b3d9c3 SJ |
1967 | /** |
1968 | * ufshcd_get_upmcrs - Get the power mode change request status | |
1969 | * @hba: Pointer to adapter instance | |
1970 | * | |
1971 | * This function gets the UPMCRS field of HCS register | |
1972 | * Returns value of UPMCRS field | |
1973 | */ | |
1974 | static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) | |
1975 | { | |
1976 | return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; | |
1977 | } | |
1978 | ||
6ccf44fe SJ |
1979 | /** |
1980 | * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers | |
1981 | * @hba: per adapter instance | |
1982 | * @uic_cmd: UIC command | |
1983 | * | |
1984 | * Mutex must be held. | |
7a3e97b0 SY |
1985 | */ |
1986 | static inline void | |
6ccf44fe | 1987 | ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) |
7a3e97b0 | 1988 | { |
6ccf44fe SJ |
1989 | WARN_ON(hba->active_uic_cmd); |
1990 | ||
1991 | hba->active_uic_cmd = uic_cmd; | |
1992 | ||
7a3e97b0 | 1993 | /* Write Args */ |
6ccf44fe SJ |
1994 | ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); |
1995 | ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); | |
1996 | ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); | |
7a3e97b0 SY |
1997 | |
1998 | /* Write UIC Cmd */ | |
6ccf44fe | 1999 | ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, |
b873a275 | 2000 | REG_UIC_COMMAND); |
7a3e97b0 SY |
2001 | } |
2002 | ||
6ccf44fe SJ |
2003 | /** |
2004 | * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command | |
2005 | * @hba: per adapter instance | |
8aa29f19 | 2006 | * @uic_cmd: UIC command |
6ccf44fe SJ |
2007 | * |
2008 | * Must be called with mutex held. | |
2009 | * Returns 0 only if success. | |
2010 | */ | |
2011 | static int | |
2012 | ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | |
2013 | { | |
2014 | int ret; | |
2015 | unsigned long flags; | |
2016 | ||
2017 | if (wait_for_completion_timeout(&uic_cmd->done, | |
2018 | msecs_to_jiffies(UIC_CMD_TIMEOUT))) | |
2019 | ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; | |
2020 | else | |
2021 | ret = -ETIMEDOUT; | |
2022 | ||
2023 | spin_lock_irqsave(hba->host->host_lock, flags); | |
2024 | hba->active_uic_cmd = NULL; | |
2025 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
2026 | ||
2027 | return ret; | |
2028 | } | |
2029 | ||
2030 | /** | |
2031 | * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result | |
2032 | * @hba: per adapter instance | |
2033 | * @uic_cmd: UIC command | |
d75f7fe4 | 2034 | * @completion: initialize the completion only if this is set to true |
6ccf44fe SJ |
2035 | * |
2036 | * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called | |
57d104c1 | 2037 | * with mutex held and host_lock locked. |
6ccf44fe SJ |
2038 | * Returns 0 only if success. |
2039 | */ | |
2040 | static int | |
d75f7fe4 YG |
2041 | __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, |
2042 | bool completion) | |
6ccf44fe | 2043 | { |
6ccf44fe SJ |
2044 | if (!ufshcd_ready_for_uic_cmd(hba)) { |
2045 | dev_err(hba->dev, | |
2046 | "Controller not ready to accept UIC commands\n"); | |
2047 | return -EIO; | |
2048 | } | |
2049 | ||
d75f7fe4 YG |
2050 | if (completion) |
2051 | init_completion(&uic_cmd->done); | |
6ccf44fe | 2052 | |
6ccf44fe | 2053 | ufshcd_dispatch_uic_cmd(hba, uic_cmd); |
6ccf44fe | 2054 | |
57d104c1 | 2055 | return 0; |
6ccf44fe SJ |
2056 | } |
2057 | ||
2058 | /** | |
2059 | * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result | |
2060 | * @hba: per adapter instance | |
2061 | * @uic_cmd: UIC command | |
2062 | * | |
2063 | * Returns 0 only if success. | |
2064 | */ | |
e77044c5 | 2065 | int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) |
6ccf44fe SJ |
2066 | { |
2067 | int ret; | |
57d104c1 | 2068 | unsigned long flags; |
6ccf44fe | 2069 | |
1ab27c9c | 2070 | ufshcd_hold(hba, false); |
6ccf44fe | 2071 | mutex_lock(&hba->uic_cmd_mutex); |
cad2e03d YG |
2072 | ufshcd_add_delay_before_dme_cmd(hba); |
2073 | ||
57d104c1 | 2074 | spin_lock_irqsave(hba->host->host_lock, flags); |
d75f7fe4 | 2075 | ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); |
57d104c1 SJ |
2076 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
2077 | if (!ret) | |
2078 | ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); | |
2079 | ||
6ccf44fe SJ |
2080 | mutex_unlock(&hba->uic_cmd_mutex); |
2081 | ||
1ab27c9c | 2082 | ufshcd_release(hba); |
6ccf44fe SJ |
2083 | return ret; |
2084 | } | |
2085 | ||
7a3e97b0 SY |
2086 | /** |
2087 | * ufshcd_map_sg - Map scatter-gather list to prdt | |
8aa29f19 BVA |
2088 | * @hba: per adapter instance |
2089 | * @lrbp: pointer to local reference block | |
7a3e97b0 SY |
2090 | * |
2091 | * Returns 0 in case of success, non-zero value in case of failure | |
2092 | */ | |
75b1cc4a | 2093 | static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) |
7a3e97b0 SY |
2094 | { |
2095 | struct ufshcd_sg_entry *prd_table; | |
2096 | struct scatterlist *sg; | |
2097 | struct scsi_cmnd *cmd; | |
2098 | int sg_segments; | |
2099 | int i; | |
2100 | ||
2101 | cmd = lrbp->cmd; | |
2102 | sg_segments = scsi_dma_map(cmd); | |
2103 | if (sg_segments < 0) | |
2104 | return sg_segments; | |
2105 | ||
2106 | if (sg_segments) { | |
75b1cc4a KK |
2107 | if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) |
2108 | lrbp->utr_descriptor_ptr->prd_table_length = | |
2109 | cpu_to_le16((u16)(sg_segments * | |
2110 | sizeof(struct ufshcd_sg_entry))); | |
2111 | else | |
2112 | lrbp->utr_descriptor_ptr->prd_table_length = | |
2113 | cpu_to_le16((u16) (sg_segments)); | |
7a3e97b0 SY |
2114 | |
2115 | prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; | |
2116 | ||
2117 | scsi_for_each_sg(cmd, sg, sg_segments, i) { | |
2118 | prd_table[i].size = | |
2119 | cpu_to_le32(((u32) sg_dma_len(sg))-1); | |
2120 | prd_table[i].base_addr = | |
2121 | cpu_to_le32(lower_32_bits(sg->dma_address)); | |
2122 | prd_table[i].upper_addr = | |
2123 | cpu_to_le32(upper_32_bits(sg->dma_address)); | |
52ac95fe | 2124 | prd_table[i].reserved = 0; |
7a3e97b0 SY |
2125 | } |
2126 | } else { | |
2127 | lrbp->utr_descriptor_ptr->prd_table_length = 0; | |
2128 | } | |
2129 | ||
2130 | return 0; | |
2131 | } | |
2132 | ||
2133 | /** | |
2fbd009b | 2134 | * ufshcd_enable_intr - enable interrupts |
7a3e97b0 | 2135 | * @hba: per adapter instance |
2fbd009b | 2136 | * @intrs: interrupt bits |
7a3e97b0 | 2137 | */ |
2fbd009b | 2138 | static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) |
7a3e97b0 | 2139 | { |
2fbd009b SJ |
2140 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); |
2141 | ||
2142 | if (hba->ufs_version == UFSHCI_VERSION_10) { | |
2143 | u32 rw; | |
2144 | rw = set & INTERRUPT_MASK_RW_VER_10; | |
2145 | set = rw | ((set ^ intrs) & intrs); | |
2146 | } else { | |
2147 | set |= intrs; | |
2148 | } | |
2149 | ||
2150 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); | |
2151 | } | |
2152 | ||
2153 | /** | |
2154 | * ufshcd_disable_intr - disable interrupts | |
2155 | * @hba: per adapter instance | |
2156 | * @intrs: interrupt bits | |
2157 | */ | |
2158 | static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) | |
2159 | { | |
2160 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); | |
2161 | ||
2162 | if (hba->ufs_version == UFSHCI_VERSION_10) { | |
2163 | u32 rw; | |
2164 | rw = (set & INTERRUPT_MASK_RW_VER_10) & | |
2165 | ~(intrs & INTERRUPT_MASK_RW_VER_10); | |
2166 | set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10); | |
2167 | ||
2168 | } else { | |
2169 | set &= ~intrs; | |
7a3e97b0 | 2170 | } |
2fbd009b SJ |
2171 | |
2172 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); | |
7a3e97b0 SY |
2173 | } |
2174 | ||
5a0b0cb9 SRT |
2175 | /** |
2176 | * ufshcd_prepare_req_desc_hdr() - Fills the requests header | |
2177 | * descriptor according to request | |
2178 | * @lrbp: pointer to local reference block | |
2179 | * @upiu_flags: flags required in the header | |
2180 | * @cmd_dir: requests data direction | |
2181 | */ | |
2182 | static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, | |
300bb13f | 2183 | u32 *upiu_flags, enum dma_data_direction cmd_dir) |
5a0b0cb9 SRT |
2184 | { |
2185 | struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; | |
2186 | u32 data_direction; | |
2187 | u32 dword_0; | |
2188 | ||
2189 | if (cmd_dir == DMA_FROM_DEVICE) { | |
2190 | data_direction = UTP_DEVICE_TO_HOST; | |
2191 | *upiu_flags = UPIU_CMD_FLAGS_READ; | |
2192 | } else if (cmd_dir == DMA_TO_DEVICE) { | |
2193 | data_direction = UTP_HOST_TO_DEVICE; | |
2194 | *upiu_flags = UPIU_CMD_FLAGS_WRITE; | |
2195 | } else { | |
2196 | data_direction = UTP_NO_DATA_TRANSFER; | |
2197 | *upiu_flags = UPIU_CMD_FLAGS_NONE; | |
2198 | } | |
2199 | ||
2200 | dword_0 = data_direction | (lrbp->command_type | |
2201 | << UPIU_COMMAND_TYPE_OFFSET); | |
2202 | if (lrbp->intr_cmd) | |
2203 | dword_0 |= UTP_REQ_DESC_INT_CMD; | |
2204 | ||
2205 | /* Transfer request descriptor header fields */ | |
2206 | req_desc->header.dword_0 = cpu_to_le32(dword_0); | |
52ac95fe YG |
2207 | /* dword_1 is reserved, hence it is set to 0 */ |
2208 | req_desc->header.dword_1 = 0; | |
5a0b0cb9 SRT |
2209 | /* |
2210 | * assigning invalid value for command status. Controller | |
2211 | * updates OCS on command completion, with the command | |
2212 | * status | |
2213 | */ | |
2214 | req_desc->header.dword_2 = | |
2215 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | |
52ac95fe YG |
2216 | /* dword_3 is reserved, hence it is set to 0 */ |
2217 | req_desc->header.dword_3 = 0; | |
51047266 YG |
2218 | |
2219 | req_desc->prd_table_length = 0; | |
5a0b0cb9 SRT |
2220 | } |
2221 | ||
2222 | /** | |
2223 | * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc, | |
2224 | * for scsi commands | |
8aa29f19 BVA |
2225 | * @lrbp: local reference block pointer |
2226 | * @upiu_flags: flags | |
5a0b0cb9 SRT |
2227 | */ |
2228 | static | |
2229 | void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) | |
2230 | { | |
2231 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; | |
52ac95fe | 2232 | unsigned short cdb_len; |
5a0b0cb9 SRT |
2233 | |
2234 | /* command descriptor fields */ | |
2235 | ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( | |
2236 | UPIU_TRANSACTION_COMMAND, upiu_flags, | |
2237 | lrbp->lun, lrbp->task_tag); | |
2238 | ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( | |
2239 | UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); | |
2240 | ||
2241 | /* Total EHS length and Data segment length will be zero */ | |
2242 | ucd_req_ptr->header.dword_2 = 0; | |
2243 | ||
2244 | ucd_req_ptr->sc.exp_data_transfer_len = | |
2245 | cpu_to_be32(lrbp->cmd->sdb.length); | |
2246 | ||
a851b2bd AA |
2247 | cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE); |
2248 | memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); | |
52ac95fe YG |
2249 | memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len); |
2250 | ||
2251 | memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); | |
5a0b0cb9 SRT |
2252 | } |
2253 | ||
68078d5c DR |
2254 | /** |
2255 | * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc, | |
2256 | * for query requsts | |
2257 | * @hba: UFS hba | |
2258 | * @lrbp: local reference block pointer | |
2259 | * @upiu_flags: flags | |
2260 | */ | |
2261 | static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, | |
2262 | struct ufshcd_lrb *lrbp, u32 upiu_flags) | |
2263 | { | |
2264 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; | |
2265 | struct ufs_query *query = &hba->dev_cmd.query; | |
e8c8e82a | 2266 | u16 len = be16_to_cpu(query->request.upiu_req.length); |
68078d5c DR |
2267 | |
2268 | /* Query request header */ | |
2269 | ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( | |
2270 | UPIU_TRANSACTION_QUERY_REQ, upiu_flags, | |
2271 | lrbp->lun, lrbp->task_tag); | |
2272 | ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( | |
2273 | 0, query->request.query_func, 0, 0); | |
2274 | ||
6861285c ZL |
2275 | /* Data segment length only need for WRITE_DESC */ |
2276 | if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) | |
2277 | ucd_req_ptr->header.dword_2 = | |
2278 | UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len); | |
2279 | else | |
2280 | ucd_req_ptr->header.dword_2 = 0; | |
68078d5c DR |
2281 | |
2282 | /* Copy the Query Request buffer as is */ | |
2283 | memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, | |
2284 | QUERY_OSF_SIZE); | |
68078d5c DR |
2285 | |
2286 | /* Copy the Descriptor */ | |
c6d4a831 | 2287 | if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) |
220d17a6 | 2288 | memcpy(ucd_req_ptr + 1, query->descriptor, len); |
c6d4a831 | 2289 | |
51047266 | 2290 | memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); |
68078d5c DR |
2291 | } |
2292 | ||
5a0b0cb9 SRT |
2293 | static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) |
2294 | { | |
2295 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; | |
2296 | ||
2297 | memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); | |
2298 | ||
2299 | /* command descriptor fields */ | |
2300 | ucd_req_ptr->header.dword_0 = | |
2301 | UPIU_HEADER_DWORD( | |
2302 | UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag); | |
51047266 YG |
2303 | /* clear rest of the fields of basic header */ |
2304 | ucd_req_ptr->header.dword_1 = 0; | |
2305 | ucd_req_ptr->header.dword_2 = 0; | |
2306 | ||
2307 | memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); | |
5a0b0cb9 SRT |
2308 | } |
2309 | ||
7a3e97b0 | 2310 | /** |
300bb13f JP |
2311 | * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU) |
2312 | * for Device Management Purposes | |
8aa29f19 BVA |
2313 | * @hba: per adapter instance |
2314 | * @lrbp: pointer to local reference block | |
7a3e97b0 | 2315 | */ |
300bb13f | 2316 | static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) |
7a3e97b0 | 2317 | { |
7a3e97b0 | 2318 | u32 upiu_flags; |
5a0b0cb9 | 2319 | int ret = 0; |
7a3e97b0 | 2320 | |
83dc7e3d | 2321 | if ((hba->ufs_version == UFSHCI_VERSION_10) || |
2322 | (hba->ufs_version == UFSHCI_VERSION_11)) | |
300bb13f | 2323 | lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; |
83dc7e3d | 2324 | else |
2325 | lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; | |
300bb13f JP |
2326 | |
2327 | ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); | |
2328 | if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) | |
2329 | ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); | |
2330 | else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) | |
2331 | ufshcd_prepare_utp_nop_upiu(lrbp); | |
2332 | else | |
2333 | ret = -EINVAL; | |
2334 | ||
2335 | return ret; | |
2336 | } | |
2337 | ||
2338 | /** | |
2339 | * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU) | |
2340 | * for SCSI Purposes | |
8aa29f19 BVA |
2341 | * @hba: per adapter instance |
2342 | * @lrbp: pointer to local reference block | |
300bb13f JP |
2343 | */ |
2344 | static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |
2345 | { | |
2346 | u32 upiu_flags; | |
2347 | int ret = 0; | |
2348 | ||
83dc7e3d | 2349 | if ((hba->ufs_version == UFSHCI_VERSION_10) || |
2350 | (hba->ufs_version == UFSHCI_VERSION_11)) | |
300bb13f | 2351 | lrbp->command_type = UTP_CMD_TYPE_SCSI; |
83dc7e3d | 2352 | else |
2353 | lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; | |
300bb13f JP |
2354 | |
2355 | if (likely(lrbp->cmd)) { | |
2356 | ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, | |
2357 | lrbp->cmd->sc_data_direction); | |
2358 | ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); | |
2359 | } else { | |
2360 | ret = -EINVAL; | |
2361 | } | |
5a0b0cb9 SRT |
2362 | |
2363 | return ret; | |
7a3e97b0 SY |
2364 | } |
2365 | ||
2a8fa600 SJ |
2366 | /** |
2367 | * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID | |
8aa29f19 | 2368 | * @upiu_wlun_id: UPIU W-LUN id |
2a8fa600 SJ |
2369 | * |
2370 | * Returns SCSI W-LUN id | |
2371 | */ | |
2372 | static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id) | |
2373 | { | |
2374 | return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE; | |
2375 | } | |
2376 | ||
7a3e97b0 SY |
2377 | /** |
2378 | * ufshcd_queuecommand - main entry point for SCSI requests | |
8aa29f19 | 2379 | * @host: SCSI host pointer |
7a3e97b0 | 2380 | * @cmd: command from SCSI Midlayer |
7a3e97b0 SY |
2381 | * |
2382 | * Returns 0 for success, non-zero in case of failure | |
2383 | */ | |
2384 | static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |
2385 | { | |
2386 | struct ufshcd_lrb *lrbp; | |
2387 | struct ufs_hba *hba; | |
2388 | unsigned long flags; | |
2389 | int tag; | |
2390 | int err = 0; | |
2391 | ||
2392 | hba = shost_priv(host); | |
2393 | ||
2394 | tag = cmd->request->tag; | |
14497328 YG |
2395 | if (!ufshcd_valid_tag(hba, tag)) { |
2396 | dev_err(hba->dev, | |
2397 | "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", | |
2398 | __func__, tag, cmd, cmd->request); | |
2399 | BUG(); | |
2400 | } | |
7a3e97b0 | 2401 | |
a3cd5ec5 SJ |
2402 | if (!down_read_trylock(&hba->clk_scaling_lock)) |
2403 | return SCSI_MLQUEUE_HOST_BUSY; | |
2404 | ||
3441da7d SRT |
2405 | spin_lock_irqsave(hba->host->host_lock, flags); |
2406 | switch (hba->ufshcd_state) { | |
2407 | case UFSHCD_STATE_OPERATIONAL: | |
2408 | break; | |
141f8165 | 2409 | case UFSHCD_STATE_EH_SCHEDULED: |
3441da7d | 2410 | case UFSHCD_STATE_RESET: |
7a3e97b0 | 2411 | err = SCSI_MLQUEUE_HOST_BUSY; |
3441da7d SRT |
2412 | goto out_unlock; |
2413 | case UFSHCD_STATE_ERROR: | |
2414 | set_host_byte(cmd, DID_ERROR); | |
2415 | cmd->scsi_done(cmd); | |
2416 | goto out_unlock; | |
2417 | default: | |
2418 | dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", | |
2419 | __func__, hba->ufshcd_state); | |
2420 | set_host_byte(cmd, DID_BAD_TARGET); | |
2421 | cmd->scsi_done(cmd); | |
2422 | goto out_unlock; | |
7a3e97b0 | 2423 | } |
53c12d0e YG |
2424 | |
2425 | /* if error handling is in progress, don't issue commands */ | |
2426 | if (ufshcd_eh_in_progress(hba)) { | |
2427 | set_host_byte(cmd, DID_ERROR); | |
2428 | cmd->scsi_done(cmd); | |
2429 | goto out_unlock; | |
2430 | } | |
3441da7d | 2431 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
7a3e97b0 | 2432 | |
7fabb77b GB |
2433 | hba->req_abort_count = 0; |
2434 | ||
5a0b0cb9 SRT |
2435 | /* acquire the tag to make sure device cmds don't use it */ |
2436 | if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { | |
2437 | /* | |
2438 | * Dev manage command in progress, requeue the command. | |
2439 | * Requeuing the command helps in cases where the request *may* | |
2440 | * find different tag instead of waiting for dev manage command | |
2441 | * completion. | |
2442 | */ | |
2443 | err = SCSI_MLQUEUE_HOST_BUSY; | |
2444 | goto out; | |
2445 | } | |
2446 | ||
1ab27c9c ST |
2447 | err = ufshcd_hold(hba, true); |
2448 | if (err) { | |
2449 | err = SCSI_MLQUEUE_HOST_BUSY; | |
2450 | clear_bit_unlock(tag, &hba->lrb_in_use); | |
2451 | goto out; | |
2452 | } | |
2453 | WARN_ON(hba->clk_gating.state != CLKS_ON); | |
2454 | ||
7a3e97b0 SY |
2455 | lrbp = &hba->lrb[tag]; |
2456 | ||
5a0b0cb9 | 2457 | WARN_ON(lrbp->cmd); |
7a3e97b0 | 2458 | lrbp->cmd = cmd; |
dcea0bfb | 2459 | lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE; |
7a3e97b0 SY |
2460 | lrbp->sense_buffer = cmd->sense_buffer; |
2461 | lrbp->task_tag = tag; | |
0ce147d4 | 2462 | lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); |
b852190e | 2463 | lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; |
e0b299e3 | 2464 | lrbp->req_abort_skip = false; |
7a3e97b0 | 2465 | |
300bb13f JP |
2466 | ufshcd_comp_scsi_upiu(hba, lrbp); |
2467 | ||
75b1cc4a | 2468 | err = ufshcd_map_sg(hba, lrbp); |
5a0b0cb9 SRT |
2469 | if (err) { |
2470 | lrbp->cmd = NULL; | |
2471 | clear_bit_unlock(tag, &hba->lrb_in_use); | |
7a3e97b0 | 2472 | goto out; |
5a0b0cb9 | 2473 | } |
ad1a1b9c GB |
2474 | /* Make sure descriptors are ready before ringing the doorbell */ |
2475 | wmb(); | |
7a3e97b0 SY |
2476 | |
2477 | /* issue command to the controller */ | |
2478 | spin_lock_irqsave(hba->host->host_lock, flags); | |
0e675efa | 2479 | ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false)); |
7a3e97b0 | 2480 | ufshcd_send_command(hba, tag); |
3441da7d | 2481 | out_unlock: |
7a3e97b0 SY |
2482 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
2483 | out: | |
a3cd5ec5 | 2484 | up_read(&hba->clk_scaling_lock); |
7a3e97b0 SY |
2485 | return err; |
2486 | } | |
2487 | ||
5a0b0cb9 SRT |
2488 | static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, |
2489 | struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) | |
2490 | { | |
2491 | lrbp->cmd = NULL; | |
2492 | lrbp->sense_bufflen = 0; | |
2493 | lrbp->sense_buffer = NULL; | |
2494 | lrbp->task_tag = tag; | |
2495 | lrbp->lun = 0; /* device management cmd is not specific to any LUN */ | |
5a0b0cb9 SRT |
2496 | lrbp->intr_cmd = true; /* No interrupt aggregation */ |
2497 | hba->dev_cmd.type = cmd_type; | |
2498 | ||
300bb13f | 2499 | return ufshcd_comp_devman_upiu(hba, lrbp); |
5a0b0cb9 SRT |
2500 | } |
2501 | ||
2502 | static int | |
2503 | ufshcd_clear_cmd(struct ufs_hba *hba, int tag) | |
2504 | { | |
2505 | int err = 0; | |
2506 | unsigned long flags; | |
2507 | u32 mask = 1 << tag; | |
2508 | ||
2509 | /* clear outstanding transaction before retry */ | |
2510 | spin_lock_irqsave(hba->host->host_lock, flags); | |
2511 | ufshcd_utrl_clear(hba, tag); | |
2512 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
2513 | ||
2514 | /* | |
2515 | * wait for for h/w to clear corresponding bit in door-bell. | |
2516 | * max. wait is 1 sec. | |
2517 | */ | |
2518 | err = ufshcd_wait_for_register(hba, | |
2519 | REG_UTP_TRANSFER_REQ_DOOR_BELL, | |
596585a2 | 2520 | mask, ~mask, 1000, 1000, true); |
5a0b0cb9 SRT |
2521 | |
2522 | return err; | |
2523 | } | |
2524 | ||
c6d4a831 DR |
2525 | static int |
2526 | ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |
2527 | { | |
2528 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; | |
2529 | ||
2530 | /* Get the UPIU response */ | |
2531 | query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >> | |
2532 | UPIU_RSP_CODE_OFFSET; | |
2533 | return query_res->response; | |
2534 | } | |
2535 | ||
5a0b0cb9 SRT |
2536 | /** |
2537 | * ufshcd_dev_cmd_completion() - handles device management command responses | |
2538 | * @hba: per adapter instance | |
2539 | * @lrbp: pointer to local reference block | |
2540 | */ | |
2541 | static int | |
2542 | ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |
2543 | { | |
2544 | int resp; | |
2545 | int err = 0; | |
2546 | ||
ff8e20c6 | 2547 | hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); |
5a0b0cb9 SRT |
2548 | resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); |
2549 | ||
2550 | switch (resp) { | |
2551 | case UPIU_TRANSACTION_NOP_IN: | |
2552 | if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { | |
2553 | err = -EINVAL; | |
2554 | dev_err(hba->dev, "%s: unexpected response %x\n", | |
2555 | __func__, resp); | |
2556 | } | |
2557 | break; | |
68078d5c | 2558 | case UPIU_TRANSACTION_QUERY_RSP: |
c6d4a831 DR |
2559 | err = ufshcd_check_query_response(hba, lrbp); |
2560 | if (!err) | |
2561 | err = ufshcd_copy_query_response(hba, lrbp); | |
68078d5c | 2562 | break; |
5a0b0cb9 SRT |
2563 | case UPIU_TRANSACTION_REJECT_UPIU: |
2564 | /* TODO: handle Reject UPIU Response */ | |
2565 | err = -EPERM; | |
2566 | dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", | |
2567 | __func__); | |
2568 | break; | |
2569 | default: | |
2570 | err = -EINVAL; | |
2571 | dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", | |
2572 | __func__, resp); | |
2573 | break; | |
2574 | } | |
2575 | ||
2576 | return err; | |
2577 | } | |
2578 | ||
2579 | static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, | |
2580 | struct ufshcd_lrb *lrbp, int max_timeout) | |
2581 | { | |
2582 | int err = 0; | |
2583 | unsigned long time_left; | |
2584 | unsigned long flags; | |
2585 | ||
2586 | time_left = wait_for_completion_timeout(hba->dev_cmd.complete, | |
2587 | msecs_to_jiffies(max_timeout)); | |
2588 | ||
ad1a1b9c GB |
2589 | /* Make sure descriptors are ready before ringing the doorbell */ |
2590 | wmb(); | |
5a0b0cb9 SRT |
2591 | spin_lock_irqsave(hba->host->host_lock, flags); |
2592 | hba->dev_cmd.complete = NULL; | |
2593 | if (likely(time_left)) { | |
2594 | err = ufshcd_get_tr_ocs(lrbp); | |
2595 | if (!err) | |
2596 | err = ufshcd_dev_cmd_completion(hba, lrbp); | |
2597 | } | |
2598 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
2599 | ||
2600 | if (!time_left) { | |
2601 | err = -ETIMEDOUT; | |
a48353f6 YG |
2602 | dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", |
2603 | __func__, lrbp->task_tag); | |
5a0b0cb9 | 2604 | if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) |
a48353f6 | 2605 | /* successfully cleared the command, retry if needed */ |
5a0b0cb9 | 2606 | err = -EAGAIN; |
a48353f6 YG |
2607 | /* |
2608 | * in case of an error, after clearing the doorbell, | |
2609 | * we also need to clear the outstanding_request | |
2610 | * field in hba | |
2611 | */ | |
2612 | ufshcd_outstanding_req_clear(hba, lrbp->task_tag); | |
5a0b0cb9 SRT |
2613 | } |
2614 | ||
2615 | return err; | |
2616 | } | |
2617 | ||
2618 | /** | |
2619 | * ufshcd_get_dev_cmd_tag - Get device management command tag | |
2620 | * @hba: per-adapter instance | |
8aa29f19 | 2621 | * @tag_out: pointer to variable with available slot value |
5a0b0cb9 SRT |
2622 | * |
2623 | * Get a free slot and lock it until device management command | |
2624 | * completes. | |
2625 | * | |
2626 | * Returns false if free slot is unavailable for locking, else | |
2627 | * return true with tag value in @tag. | |
2628 | */ | |
2629 | static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) | |
2630 | { | |
2631 | int tag; | |
2632 | bool ret = false; | |
2633 | unsigned long tmp; | |
2634 | ||
2635 | if (!tag_out) | |
2636 | goto out; | |
2637 | ||
2638 | do { | |
2639 | tmp = ~hba->lrb_in_use; | |
2640 | tag = find_last_bit(&tmp, hba->nutrs); | |
2641 | if (tag >= hba->nutrs) | |
2642 | goto out; | |
2643 | } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); | |
2644 | ||
2645 | *tag_out = tag; | |
2646 | ret = true; | |
2647 | out: | |
2648 | return ret; | |
2649 | } | |
2650 | ||
2651 | static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) | |
2652 | { | |
2653 | clear_bit_unlock(tag, &hba->lrb_in_use); | |
2654 | } | |
2655 | ||
2656 | /** | |
2657 | * ufshcd_exec_dev_cmd - API for sending device management requests | |
8aa29f19 BVA |
2658 | * @hba: UFS hba |
2659 | * @cmd_type: specifies the type (NOP, Query...) | |
2660 | * @timeout: time in seconds | |
5a0b0cb9 | 2661 | * |
68078d5c DR |
2662 | * NOTE: Since there is only one available tag for device management commands, |
2663 | * it is expected you hold the hba->dev_cmd.lock mutex. | |
5a0b0cb9 SRT |
2664 | */ |
2665 | static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, | |
2666 | enum dev_cmd_type cmd_type, int timeout) | |
2667 | { | |
2668 | struct ufshcd_lrb *lrbp; | |
2669 | int err; | |
2670 | int tag; | |
2671 | struct completion wait; | |
2672 | unsigned long flags; | |
2673 | ||
a3cd5ec5 SJ |
2674 | down_read(&hba->clk_scaling_lock); |
2675 | ||
5a0b0cb9 SRT |
2676 | /* |
2677 | * Get free slot, sleep if slots are unavailable. | |
2678 | * Even though we use wait_event() which sleeps indefinitely, | |
2679 | * the maximum wait time is bounded by SCSI request timeout. | |
2680 | */ | |
2681 | wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); | |
2682 | ||
2683 | init_completion(&wait); | |
2684 | lrbp = &hba->lrb[tag]; | |
2685 | WARN_ON(lrbp->cmd); | |
2686 | err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); | |
2687 | if (unlikely(err)) | |
2688 | goto out_put_tag; | |
2689 | ||
2690 | hba->dev_cmd.complete = &wait; | |
2691 | ||
6667e6d9 | 2692 | ufshcd_add_query_upiu_trace(hba, tag, "query_send"); |
e3dfdc53 YG |
2693 | /* Make sure descriptors are ready before ringing the doorbell */ |
2694 | wmb(); | |
5a0b0cb9 | 2695 | spin_lock_irqsave(hba->host->host_lock, flags); |
0e675efa | 2696 | ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false)); |
5a0b0cb9 SRT |
2697 | ufshcd_send_command(hba, tag); |
2698 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
2699 | ||
2700 | err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); | |
2701 | ||
6667e6d9 OS |
2702 | ufshcd_add_query_upiu_trace(hba, tag, |
2703 | err ? "query_complete_err" : "query_complete"); | |
2704 | ||
5a0b0cb9 SRT |
2705 | out_put_tag: |
2706 | ufshcd_put_dev_cmd_tag(hba, tag); | |
2707 | wake_up(&hba->dev_cmd.tag_wq); | |
a3cd5ec5 | 2708 | up_read(&hba->clk_scaling_lock); |
5a0b0cb9 SRT |
2709 | return err; |
2710 | } | |
2711 | ||
d44a5f98 DR |
2712 | /** |
2713 | * ufshcd_init_query() - init the query response and request parameters | |
2714 | * @hba: per-adapter instance | |
2715 | * @request: address of the request pointer to be initialized | |
2716 | * @response: address of the response pointer to be initialized | |
2717 | * @opcode: operation to perform | |
2718 | * @idn: flag idn to access | |
2719 | * @index: LU number to access | |
2720 | * @selector: query/flag/descriptor further identification | |
2721 | */ | |
2722 | static inline void ufshcd_init_query(struct ufs_hba *hba, | |
2723 | struct ufs_query_req **request, struct ufs_query_res **response, | |
2724 | enum query_opcode opcode, u8 idn, u8 index, u8 selector) | |
2725 | { | |
2726 | *request = &hba->dev_cmd.query.request; | |
2727 | *response = &hba->dev_cmd.query.response; | |
2728 | memset(*request, 0, sizeof(struct ufs_query_req)); | |
2729 | memset(*response, 0, sizeof(struct ufs_query_res)); | |
2730 | (*request)->upiu_req.opcode = opcode; | |
2731 | (*request)->upiu_req.idn = idn; | |
2732 | (*request)->upiu_req.index = index; | |
2733 | (*request)->upiu_req.selector = selector; | |
2734 | } | |
2735 | ||
dc3c8d3a YG |
2736 | static int ufshcd_query_flag_retry(struct ufs_hba *hba, |
2737 | enum query_opcode opcode, enum flag_idn idn, bool *flag_res) | |
2738 | { | |
2739 | int ret; | |
2740 | int retries; | |
2741 | ||
2742 | for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { | |
2743 | ret = ufshcd_query_flag(hba, opcode, idn, flag_res); | |
2744 | if (ret) | |
2745 | dev_dbg(hba->dev, | |
2746 | "%s: failed with error %d, retries %d\n", | |
2747 | __func__, ret, retries); | |
2748 | else | |
2749 | break; | |
2750 | } | |
2751 | ||
2752 | if (ret) | |
2753 | dev_err(hba->dev, | |
2754 | "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n", | |
2755 | __func__, opcode, idn, ret, retries); | |
2756 | return ret; | |
2757 | } | |
2758 | ||
68078d5c DR |
2759 | /** |
2760 | * ufshcd_query_flag() - API function for sending flag query requests | |
8aa29f19 BVA |
2761 | * @hba: per-adapter instance |
2762 | * @opcode: flag query to perform | |
2763 | * @idn: flag idn to access | |
2764 | * @flag_res: the flag value after the query request completes | |
68078d5c DR |
2765 | * |
2766 | * Returns 0 for success, non-zero in case of failure | |
2767 | */ | |
dc3c8d3a | 2768 | int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, |
68078d5c DR |
2769 | enum flag_idn idn, bool *flag_res) |
2770 | { | |
d44a5f98 DR |
2771 | struct ufs_query_req *request = NULL; |
2772 | struct ufs_query_res *response = NULL; | |
2773 | int err, index = 0, selector = 0; | |
e5ad406c | 2774 | int timeout = QUERY_REQ_TIMEOUT; |
68078d5c DR |
2775 | |
2776 | BUG_ON(!hba); | |
2777 | ||
1ab27c9c | 2778 | ufshcd_hold(hba, false); |
68078d5c | 2779 | mutex_lock(&hba->dev_cmd.lock); |
d44a5f98 DR |
2780 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, |
2781 | selector); | |
68078d5c DR |
2782 | |
2783 | switch (opcode) { | |
2784 | case UPIU_QUERY_OPCODE_SET_FLAG: | |
2785 | case UPIU_QUERY_OPCODE_CLEAR_FLAG: | |
2786 | case UPIU_QUERY_OPCODE_TOGGLE_FLAG: | |
2787 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | |
2788 | break; | |
2789 | case UPIU_QUERY_OPCODE_READ_FLAG: | |
2790 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | |
2791 | if (!flag_res) { | |
2792 | /* No dummy reads */ | |
2793 | dev_err(hba->dev, "%s: Invalid argument for read request\n", | |
2794 | __func__); | |
2795 | err = -EINVAL; | |
2796 | goto out_unlock; | |
2797 | } | |
2798 | break; | |
2799 | default: | |
2800 | dev_err(hba->dev, | |
2801 | "%s: Expected query flag opcode but got = %d\n", | |
2802 | __func__, opcode); | |
2803 | err = -EINVAL; | |
2804 | goto out_unlock; | |
2805 | } | |
68078d5c | 2806 | |
e5ad406c | 2807 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); |
68078d5c DR |
2808 | |
2809 | if (err) { | |
2810 | dev_err(hba->dev, | |
2811 | "%s: Sending flag query for idn %d failed, err = %d\n", | |
2812 | __func__, idn, err); | |
2813 | goto out_unlock; | |
2814 | } | |
2815 | ||
2816 | if (flag_res) | |
e8c8e82a | 2817 | *flag_res = (be32_to_cpu(response->upiu_res.value) & |
68078d5c DR |
2818 | MASK_QUERY_UPIU_FLAG_LOC) & 0x1; |
2819 | ||
2820 | out_unlock: | |
2821 | mutex_unlock(&hba->dev_cmd.lock); | |
1ab27c9c | 2822 | ufshcd_release(hba); |
68078d5c DR |
2823 | return err; |
2824 | } | |
2825 | ||
66ec6d59 SRT |
2826 | /** |
2827 | * ufshcd_query_attr - API function for sending attribute requests | |
8aa29f19 BVA |
2828 | * @hba: per-adapter instance |
2829 | * @opcode: attribute opcode | |
2830 | * @idn: attribute idn to access | |
2831 | * @index: index field | |
2832 | * @selector: selector field | |
2833 | * @attr_val: the attribute value after the query request completes | |
66ec6d59 SRT |
2834 | * |
2835 | * Returns 0 for success, non-zero in case of failure | |
2836 | */ | |
ec92b59c SN |
2837 | int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, |
2838 | enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) | |
66ec6d59 | 2839 | { |
d44a5f98 DR |
2840 | struct ufs_query_req *request = NULL; |
2841 | struct ufs_query_res *response = NULL; | |
66ec6d59 SRT |
2842 | int err; |
2843 | ||
2844 | BUG_ON(!hba); | |
2845 | ||
1ab27c9c | 2846 | ufshcd_hold(hba, false); |
66ec6d59 SRT |
2847 | if (!attr_val) { |
2848 | dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", | |
2849 | __func__, opcode); | |
2850 | err = -EINVAL; | |
2851 | goto out; | |
2852 | } | |
2853 | ||
2854 | mutex_lock(&hba->dev_cmd.lock); | |
d44a5f98 DR |
2855 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, |
2856 | selector); | |
66ec6d59 SRT |
2857 | |
2858 | switch (opcode) { | |
2859 | case UPIU_QUERY_OPCODE_WRITE_ATTR: | |
2860 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | |
e8c8e82a | 2861 | request->upiu_req.value = cpu_to_be32(*attr_val); |
66ec6d59 SRT |
2862 | break; |
2863 | case UPIU_QUERY_OPCODE_READ_ATTR: | |
2864 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | |
2865 | break; | |
2866 | default: | |
2867 | dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", | |
2868 | __func__, opcode); | |
2869 | err = -EINVAL; | |
2870 | goto out_unlock; | |
2871 | } | |
2872 | ||
d44a5f98 | 2873 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); |
66ec6d59 SRT |
2874 | |
2875 | if (err) { | |
4b761b58 YG |
2876 | dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", |
2877 | __func__, opcode, idn, index, err); | |
66ec6d59 SRT |
2878 | goto out_unlock; |
2879 | } | |
2880 | ||
e8c8e82a | 2881 | *attr_val = be32_to_cpu(response->upiu_res.value); |
66ec6d59 SRT |
2882 | |
2883 | out_unlock: | |
2884 | mutex_unlock(&hba->dev_cmd.lock); | |
2885 | out: | |
1ab27c9c | 2886 | ufshcd_release(hba); |
66ec6d59 SRT |
2887 | return err; |
2888 | } | |
2889 | ||
5e86ae44 YG |
2890 | /** |
2891 | * ufshcd_query_attr_retry() - API function for sending query | |
2892 | * attribute with retries | |
2893 | * @hba: per-adapter instance | |
2894 | * @opcode: attribute opcode | |
2895 | * @idn: attribute idn to access | |
2896 | * @index: index field | |
2897 | * @selector: selector field | |
2898 | * @attr_val: the attribute value after the query request | |
2899 | * completes | |
2900 | * | |
2901 | * Returns 0 for success, non-zero in case of failure | |
2902 | */ | |
2903 | static int ufshcd_query_attr_retry(struct ufs_hba *hba, | |
2904 | enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, | |
2905 | u32 *attr_val) | |
2906 | { | |
2907 | int ret = 0; | |
2908 | u32 retries; | |
2909 | ||
2910 | for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { | |
2911 | ret = ufshcd_query_attr(hba, opcode, idn, index, | |
2912 | selector, attr_val); | |
2913 | if (ret) | |
2914 | dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", | |
2915 | __func__, ret, retries); | |
2916 | else | |
2917 | break; | |
2918 | } | |
2919 | ||
2920 | if (ret) | |
2921 | dev_err(hba->dev, | |
2922 | "%s: query attribute, idn %d, failed with error %d after %d retires\n", | |
2923 | __func__, idn, ret, QUERY_REQ_RETRIES); | |
2924 | return ret; | |
2925 | } | |
2926 | ||
a70e91b8 | 2927 | static int __ufshcd_query_descriptor(struct ufs_hba *hba, |
d44a5f98 DR |
2928 | enum query_opcode opcode, enum desc_idn idn, u8 index, |
2929 | u8 selector, u8 *desc_buf, int *buf_len) | |
2930 | { | |
2931 | struct ufs_query_req *request = NULL; | |
2932 | struct ufs_query_res *response = NULL; | |
2933 | int err; | |
2934 | ||
2935 | BUG_ON(!hba); | |
2936 | ||
1ab27c9c | 2937 | ufshcd_hold(hba, false); |
d44a5f98 DR |
2938 | if (!desc_buf) { |
2939 | dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", | |
2940 | __func__, opcode); | |
2941 | err = -EINVAL; | |
2942 | goto out; | |
2943 | } | |
2944 | ||
a4b0e8a4 | 2945 | if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { |
d44a5f98 DR |
2946 | dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", |
2947 | __func__, *buf_len); | |
2948 | err = -EINVAL; | |
2949 | goto out; | |
2950 | } | |
2951 | ||
2952 | mutex_lock(&hba->dev_cmd.lock); | |
2953 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, | |
2954 | selector); | |
2955 | hba->dev_cmd.query.descriptor = desc_buf; | |
ea2aab24 | 2956 | request->upiu_req.length = cpu_to_be16(*buf_len); |
d44a5f98 DR |
2957 | |
2958 | switch (opcode) { | |
2959 | case UPIU_QUERY_OPCODE_WRITE_DESC: | |
2960 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | |
2961 | break; | |
2962 | case UPIU_QUERY_OPCODE_READ_DESC: | |
2963 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | |
2964 | break; | |
2965 | default: | |
2966 | dev_err(hba->dev, | |
2967 | "%s: Expected query descriptor opcode but got = 0x%.2x\n", | |
2968 | __func__, opcode); | |
2969 | err = -EINVAL; | |
2970 | goto out_unlock; | |
2971 | } | |
2972 | ||
2973 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); | |
2974 | ||
2975 | if (err) { | |
4b761b58 YG |
2976 | dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", |
2977 | __func__, opcode, idn, index, err); | |
d44a5f98 DR |
2978 | goto out_unlock; |
2979 | } | |
2980 | ||
2981 | hba->dev_cmd.query.descriptor = NULL; | |
ea2aab24 | 2982 | *buf_len = be16_to_cpu(response->upiu_res.length); |
d44a5f98 DR |
2983 | |
2984 | out_unlock: | |
2985 | mutex_unlock(&hba->dev_cmd.lock); | |
2986 | out: | |
1ab27c9c | 2987 | ufshcd_release(hba); |
d44a5f98 DR |
2988 | return err; |
2989 | } | |
2990 | ||
a70e91b8 | 2991 | /** |
8aa29f19 BVA |
2992 | * ufshcd_query_descriptor_retry - API function for sending descriptor requests |
2993 | * @hba: per-adapter instance | |
2994 | * @opcode: attribute opcode | |
2995 | * @idn: attribute idn to access | |
2996 | * @index: index field | |
2997 | * @selector: selector field | |
2998 | * @desc_buf: the buffer that contains the descriptor | |
2999 | * @buf_len: length parameter passed to the device | |
a70e91b8 YG |
3000 | * |
3001 | * Returns 0 for success, non-zero in case of failure. | |
3002 | * The buf_len parameter will contain, on return, the length parameter | |
3003 | * received on the response. | |
3004 | */ | |
2238d31c SN |
3005 | int ufshcd_query_descriptor_retry(struct ufs_hba *hba, |
3006 | enum query_opcode opcode, | |
3007 | enum desc_idn idn, u8 index, | |
3008 | u8 selector, | |
3009 | u8 *desc_buf, int *buf_len) | |
a70e91b8 YG |
3010 | { |
3011 | int err; | |
3012 | int retries; | |
3013 | ||
3014 | for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { | |
3015 | err = __ufshcd_query_descriptor(hba, opcode, idn, index, | |
3016 | selector, desc_buf, buf_len); | |
3017 | if (!err || err == -EINVAL) | |
3018 | break; | |
3019 | } | |
3020 | ||
3021 | return err; | |
3022 | } | |
a70e91b8 | 3023 | |
a4b0e8a4 PM |
3024 | /** |
3025 | * ufshcd_read_desc_length - read the specified descriptor length from header | |
3026 | * @hba: Pointer to adapter instance | |
3027 | * @desc_id: descriptor idn value | |
3028 | * @desc_index: descriptor index | |
3029 | * @desc_length: pointer to variable to read the length of descriptor | |
3030 | * | |
3031 | * Return 0 in case of success, non-zero otherwise | |
3032 | */ | |
3033 | static int ufshcd_read_desc_length(struct ufs_hba *hba, | |
3034 | enum desc_idn desc_id, | |
3035 | int desc_index, | |
3036 | int *desc_length) | |
3037 | { | |
3038 | int ret; | |
3039 | u8 header[QUERY_DESC_HDR_SIZE]; | |
3040 | int header_len = QUERY_DESC_HDR_SIZE; | |
3041 | ||
3042 | if (desc_id >= QUERY_DESC_IDN_MAX) | |
3043 | return -EINVAL; | |
3044 | ||
3045 | ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, | |
3046 | desc_id, desc_index, 0, header, | |
3047 | &header_len); | |
3048 | ||
3049 | if (ret) { | |
3050 | dev_err(hba->dev, "%s: Failed to get descriptor header id %d", | |
3051 | __func__, desc_id); | |
3052 | return ret; | |
3053 | } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) { | |
3054 | dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch", | |
3055 | __func__, header[QUERY_DESC_DESC_TYPE_OFFSET], | |
3056 | desc_id); | |
3057 | ret = -EINVAL; | |
3058 | } | |
3059 | ||
3060 | *desc_length = header[QUERY_DESC_LENGTH_OFFSET]; | |
3061 | return ret; | |
3062 | ||
3063 | } | |
3064 | ||
3065 | /** | |
3066 | * ufshcd_map_desc_id_to_length - map descriptor IDN to its length | |
3067 | * @hba: Pointer to adapter instance | |
3068 | * @desc_id: descriptor idn value | |
3069 | * @desc_len: mapped desc length (out) | |
3070 | * | |
3071 | * Return 0 in case of success, non-zero otherwise | |
3072 | */ | |
3073 | int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, | |
3074 | enum desc_idn desc_id, int *desc_len) | |
3075 | { | |
3076 | switch (desc_id) { | |
3077 | case QUERY_DESC_IDN_DEVICE: | |
3078 | *desc_len = hba->desc_size.dev_desc; | |
3079 | break; | |
3080 | case QUERY_DESC_IDN_POWER: | |
3081 | *desc_len = hba->desc_size.pwr_desc; | |
3082 | break; | |
3083 | case QUERY_DESC_IDN_GEOMETRY: | |
3084 | *desc_len = hba->desc_size.geom_desc; | |
3085 | break; | |
3086 | case QUERY_DESC_IDN_CONFIGURATION: | |
3087 | *desc_len = hba->desc_size.conf_desc; | |
3088 | break; | |
3089 | case QUERY_DESC_IDN_UNIT: | |
3090 | *desc_len = hba->desc_size.unit_desc; | |
3091 | break; | |
3092 | case QUERY_DESC_IDN_INTERCONNECT: | |
3093 | *desc_len = hba->desc_size.interc_desc; | |
3094 | break; | |
3095 | case QUERY_DESC_IDN_STRING: | |
3096 | *desc_len = QUERY_DESC_MAX_SIZE; | |
3097 | break; | |
c648c2d2 SN |
3098 | case QUERY_DESC_IDN_HEALTH: |
3099 | *desc_len = hba->desc_size.hlth_desc; | |
3100 | break; | |
a4b0e8a4 PM |
3101 | case QUERY_DESC_IDN_RFU_0: |
3102 | case QUERY_DESC_IDN_RFU_1: | |
3103 | *desc_len = 0; | |
3104 | break; | |
3105 | default: | |
3106 | *desc_len = 0; | |
3107 | return -EINVAL; | |
3108 | } | |
3109 | return 0; | |
3110 | } | |
3111 | EXPORT_SYMBOL(ufshcd_map_desc_id_to_length); | |
3112 | ||
da461cec SJ |
3113 | /** |
3114 | * ufshcd_read_desc_param - read the specified descriptor parameter | |
3115 | * @hba: Pointer to adapter instance | |
3116 | * @desc_id: descriptor idn value | |
3117 | * @desc_index: descriptor index | |
3118 | * @param_offset: offset of the parameter to read | |
3119 | * @param_read_buf: pointer to buffer where parameter would be read | |
3120 | * @param_size: sizeof(param_read_buf) | |
3121 | * | |
3122 | * Return 0 in case of success, non-zero otherwise | |
3123 | */ | |
45bced87 SN |
3124 | int ufshcd_read_desc_param(struct ufs_hba *hba, |
3125 | enum desc_idn desc_id, | |
3126 | int desc_index, | |
3127 | u8 param_offset, | |
3128 | u8 *param_read_buf, | |
3129 | u8 param_size) | |
da461cec SJ |
3130 | { |
3131 | int ret; | |
3132 | u8 *desc_buf; | |
a4b0e8a4 | 3133 | int buff_len; |
da461cec SJ |
3134 | bool is_kmalloc = true; |
3135 | ||
a4b0e8a4 PM |
3136 | /* Safety check */ |
3137 | if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) | |
da461cec SJ |
3138 | return -EINVAL; |
3139 | ||
a4b0e8a4 PM |
3140 | /* Get the max length of descriptor from structure filled up at probe |
3141 | * time. | |
3142 | */ | |
3143 | ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); | |
da461cec | 3144 | |
a4b0e8a4 PM |
3145 | /* Sanity checks */ |
3146 | if (ret || !buff_len) { | |
3147 | dev_err(hba->dev, "%s: Failed to get full descriptor length", | |
3148 | __func__); | |
3149 | return ret; | |
3150 | } | |
3151 | ||
3152 | /* Check whether we need temp memory */ | |
3153 | if (param_offset != 0 || param_size < buff_len) { | |
da461cec SJ |
3154 | desc_buf = kmalloc(buff_len, GFP_KERNEL); |
3155 | if (!desc_buf) | |
3156 | return -ENOMEM; | |
a4b0e8a4 PM |
3157 | } else { |
3158 | desc_buf = param_read_buf; | |
3159 | is_kmalloc = false; | |
da461cec SJ |
3160 | } |
3161 | ||
a4b0e8a4 | 3162 | /* Request for full descriptor */ |
a70e91b8 | 3163 | ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, |
a4b0e8a4 PM |
3164 | desc_id, desc_index, 0, |
3165 | desc_buf, &buff_len); | |
da461cec | 3166 | |
bde44bb6 SJ |
3167 | if (ret) { |
3168 | dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d", | |
3169 | __func__, desc_id, desc_index, param_offset, ret); | |
da461cec SJ |
3170 | goto out; |
3171 | } | |
3172 | ||
bde44bb6 SJ |
3173 | /* Sanity check */ |
3174 | if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { | |
3175 | dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header", | |
3176 | __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); | |
3177 | ret = -EINVAL; | |
3178 | goto out; | |
3179 | } | |
3180 | ||
a4b0e8a4 PM |
3181 | /* Check wherher we will not copy more data, than available */ |
3182 | if (is_kmalloc && param_size > buff_len) | |
3183 | param_size = buff_len; | |
bde44bb6 | 3184 | |
da461cec SJ |
3185 | if (is_kmalloc) |
3186 | memcpy(param_read_buf, &desc_buf[param_offset], param_size); | |
3187 | out: | |
3188 | if (is_kmalloc) | |
3189 | kfree(desc_buf); | |
3190 | return ret; | |
3191 | } | |
3192 | ||
3193 | static inline int ufshcd_read_desc(struct ufs_hba *hba, | |
3194 | enum desc_idn desc_id, | |
3195 | int desc_index, | |
3196 | u8 *buf, | |
3197 | u32 size) | |
3198 | { | |
3199 | return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); | |
3200 | } | |
3201 | ||
3202 | static inline int ufshcd_read_power_desc(struct ufs_hba *hba, | |
3203 | u8 *buf, | |
3204 | u32 size) | |
3205 | { | |
dbd34a61 | 3206 | return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); |
da461cec SJ |
3207 | } |
3208 | ||
8209b6d5 | 3209 | static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) |
b573d484 YG |
3210 | { |
3211 | return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); | |
3212 | } | |
b573d484 YG |
3213 | |
3214 | /** | |
3215 | * ufshcd_read_string_desc - read string descriptor | |
3216 | * @hba: pointer to adapter instance | |
3217 | * @desc_index: descriptor index | |
3218 | * @buf: pointer to buffer where descriptor would be read | |
3219 | * @size: size of buf | |
3220 | * @ascii: if true convert from unicode to ascii characters | |
3221 | * | |
3222 | * Return 0 in case of success, non-zero otherwise | |
3223 | */ | |
2238d31c SN |
3224 | int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, |
3225 | u8 *buf, u32 size, bool ascii) | |
b573d484 YG |
3226 | { |
3227 | int err = 0; | |
3228 | ||
3229 | err = ufshcd_read_desc(hba, | |
3230 | QUERY_DESC_IDN_STRING, desc_index, buf, size); | |
3231 | ||
3232 | if (err) { | |
3233 | dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", | |
3234 | __func__, QUERY_REQ_RETRIES, err); | |
3235 | goto out; | |
3236 | } | |
3237 | ||
3238 | if (ascii) { | |
3239 | int desc_len; | |
3240 | int ascii_len; | |
3241 | int i; | |
3242 | char *buff_ascii; | |
3243 | ||
3244 | desc_len = buf[0]; | |
3245 | /* remove header and divide by 2 to move from UTF16 to UTF8 */ | |
3246 | ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; | |
3247 | if (size < ascii_len + QUERY_DESC_HDR_SIZE) { | |
3248 | dev_err(hba->dev, "%s: buffer allocated size is too small\n", | |
3249 | __func__); | |
3250 | err = -ENOMEM; | |
3251 | goto out; | |
3252 | } | |
3253 | ||
3254 | buff_ascii = kmalloc(ascii_len, GFP_KERNEL); | |
3255 | if (!buff_ascii) { | |
3256 | err = -ENOMEM; | |
fcbefc3b | 3257 | goto out; |
b573d484 YG |
3258 | } |
3259 | ||
3260 | /* | |
3261 | * the descriptor contains string in UTF16 format | |
3262 | * we need to convert to utf-8 so it can be displayed | |
3263 | */ | |
3264 | utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE], | |
3265 | desc_len - QUERY_DESC_HDR_SIZE, | |
3266 | UTF16_BIG_ENDIAN, buff_ascii, ascii_len); | |
3267 | ||
3268 | /* replace non-printable or non-ASCII characters with spaces */ | |
3269 | for (i = 0; i < ascii_len; i++) | |
3270 | ufshcd_remove_non_printable(&buff_ascii[i]); | |
3271 | ||
3272 | memset(buf + QUERY_DESC_HDR_SIZE, 0, | |
3273 | size - QUERY_DESC_HDR_SIZE); | |
3274 | memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); | |
3275 | buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; | |
b573d484 YG |
3276 | kfree(buff_ascii); |
3277 | } | |
3278 | out: | |
3279 | return err; | |
3280 | } | |
b573d484 | 3281 | |
da461cec SJ |
3282 | /** |
3283 | * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter | |
3284 | * @hba: Pointer to adapter instance | |
3285 | * @lun: lun id | |
3286 | * @param_offset: offset of the parameter to read | |
3287 | * @param_read_buf: pointer to buffer where parameter would be read | |
3288 | * @param_size: sizeof(param_read_buf) | |
3289 | * | |
3290 | * Return 0 in case of success, non-zero otherwise | |
3291 | */ | |
3292 | static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, | |
3293 | int lun, | |
3294 | enum unit_desc_param param_offset, | |
3295 | u8 *param_read_buf, | |
3296 | u32 param_size) | |
3297 | { | |
3298 | /* | |
3299 | * Unit descriptors are only available for general purpose LUs (LUN id | |
3300 | * from 0 to 7) and RPMB Well known LU. | |
3301 | */ | |
d829fc8a | 3302 | if (!ufs_is_valid_unit_desc_lun(lun)) |
da461cec SJ |
3303 | return -EOPNOTSUPP; |
3304 | ||
3305 | return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, | |
3306 | param_offset, param_read_buf, param_size); | |
3307 | } | |
3308 | ||
7a3e97b0 SY |
3309 | /** |
3310 | * ufshcd_memory_alloc - allocate memory for host memory space data structures | |
3311 | * @hba: per adapter instance | |
3312 | * | |
3313 | * 1. Allocate DMA memory for Command Descriptor array | |
3314 | * Each command descriptor consist of Command UPIU, Response UPIU and PRDT | |
3315 | * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL). | |
3316 | * 3. Allocate DMA memory for UTP Task Management Request Descriptor List | |
3317 | * (UTMRDL) | |
3318 | * 4. Allocate memory for local reference block(lrb). | |
3319 | * | |
3320 | * Returns 0 for success, non-zero in case of failure | |
3321 | */ | |
3322 | static int ufshcd_memory_alloc(struct ufs_hba *hba) | |
3323 | { | |
3324 | size_t utmrdl_size, utrdl_size, ucdl_size; | |
3325 | ||
3326 | /* Allocate memory for UTP command descriptors */ | |
3327 | ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); | |
2953f850 SJ |
3328 | hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, |
3329 | ucdl_size, | |
3330 | &hba->ucdl_dma_addr, | |
3331 | GFP_KERNEL); | |
7a3e97b0 SY |
3332 | |
3333 | /* | |
3334 | * UFSHCI requires UTP command descriptor to be 128 byte aligned. | |
3335 | * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE | |
3336 | * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will | |
3337 | * be aligned to 128 bytes as well | |
3338 | */ | |
3339 | if (!hba->ucdl_base_addr || | |
3340 | WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { | |
3b1d0580 | 3341 | dev_err(hba->dev, |
7a3e97b0 SY |
3342 | "Command Descriptor Memory allocation failed\n"); |
3343 | goto out; | |
3344 | } | |
3345 | ||
3346 | /* | |
3347 | * Allocate memory for UTP Transfer descriptors | |
3348 | * UFSHCI requires 1024 byte alignment of UTRD | |
3349 | */ | |
3350 | utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); | |
2953f850 SJ |
3351 | hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, |
3352 | utrdl_size, | |
3353 | &hba->utrdl_dma_addr, | |
3354 | GFP_KERNEL); | |
7a3e97b0 SY |
3355 | if (!hba->utrdl_base_addr || |
3356 | WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { | |
3b1d0580 | 3357 | dev_err(hba->dev, |
7a3e97b0 SY |
3358 | "Transfer Descriptor Memory allocation failed\n"); |
3359 | goto out; | |
3360 | } | |
3361 | ||
3362 | /* | |
3363 | * Allocate memory for UTP Task Management descriptors | |
3364 | * UFSHCI requires 1024 byte alignment of UTMRD | |
3365 | */ | |
3366 | utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; | |
2953f850 SJ |
3367 | hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, |
3368 | utmrdl_size, | |
3369 | &hba->utmrdl_dma_addr, | |
3370 | GFP_KERNEL); | |
7a3e97b0 SY |
3371 | if (!hba->utmrdl_base_addr || |
3372 | WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { | |
3b1d0580 | 3373 | dev_err(hba->dev, |
7a3e97b0 SY |
3374 | "Task Management Descriptor Memory allocation failed\n"); |
3375 | goto out; | |
3376 | } | |
3377 | ||
3378 | /* Allocate memory for local reference block */ | |
a86854d0 KC |
3379 | hba->lrb = devm_kcalloc(hba->dev, |
3380 | hba->nutrs, sizeof(struct ufshcd_lrb), | |
2953f850 | 3381 | GFP_KERNEL); |
7a3e97b0 | 3382 | if (!hba->lrb) { |
3b1d0580 | 3383 | dev_err(hba->dev, "LRB Memory allocation failed\n"); |
7a3e97b0 SY |
3384 | goto out; |
3385 | } | |
3386 | return 0; | |
3387 | out: | |
7a3e97b0 SY |
3388 | return -ENOMEM; |
3389 | } | |
3390 | ||
3391 | /** | |
3392 | * ufshcd_host_memory_configure - configure local reference block with | |
3393 | * memory offsets | |
3394 | * @hba: per adapter instance | |
3395 | * | |
3396 | * Configure Host memory space | |
3397 | * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA | |
3398 | * address. | |
3399 | * 2. Update each UTRD with Response UPIU offset, Response UPIU length | |
3400 | * and PRDT offset. | |
3401 | * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT | |
3402 | * into local reference block. | |
3403 | */ | |
3404 | static void ufshcd_host_memory_configure(struct ufs_hba *hba) | |
3405 | { | |
3406 | struct utp_transfer_cmd_desc *cmd_descp; | |
3407 | struct utp_transfer_req_desc *utrdlp; | |
3408 | dma_addr_t cmd_desc_dma_addr; | |
3409 | dma_addr_t cmd_desc_element_addr; | |
3410 | u16 response_offset; | |
3411 | u16 prdt_offset; | |
3412 | int cmd_desc_size; | |
3413 | int i; | |
3414 | ||
3415 | utrdlp = hba->utrdl_base_addr; | |
3416 | cmd_descp = hba->ucdl_base_addr; | |
3417 | ||
3418 | response_offset = | |
3419 | offsetof(struct utp_transfer_cmd_desc, response_upiu); | |
3420 | prdt_offset = | |
3421 | offsetof(struct utp_transfer_cmd_desc, prd_table); | |
3422 | ||
3423 | cmd_desc_size = sizeof(struct utp_transfer_cmd_desc); | |
3424 | cmd_desc_dma_addr = hba->ucdl_dma_addr; | |
3425 | ||
3426 | for (i = 0; i < hba->nutrs; i++) { | |
3427 | /* Configure UTRD with command descriptor base address */ | |
3428 | cmd_desc_element_addr = | |
3429 | (cmd_desc_dma_addr + (cmd_desc_size * i)); | |
3430 | utrdlp[i].command_desc_base_addr_lo = | |
3431 | cpu_to_le32(lower_32_bits(cmd_desc_element_addr)); | |
3432 | utrdlp[i].command_desc_base_addr_hi = | |
3433 | cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); | |
3434 | ||
3435 | /* Response upiu and prdt offset should be in double words */ | |
75b1cc4a KK |
3436 | if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { |
3437 | utrdlp[i].response_upiu_offset = | |
3438 | cpu_to_le16(response_offset); | |
3439 | utrdlp[i].prd_table_offset = | |
3440 | cpu_to_le16(prdt_offset); | |
3441 | utrdlp[i].response_upiu_length = | |
3442 | cpu_to_le16(ALIGNED_UPIU_SIZE); | |
3443 | } else { | |
3444 | utrdlp[i].response_upiu_offset = | |
7a3e97b0 | 3445 | cpu_to_le16((response_offset >> 2)); |
75b1cc4a | 3446 | utrdlp[i].prd_table_offset = |
7a3e97b0 | 3447 | cpu_to_le16((prdt_offset >> 2)); |
75b1cc4a | 3448 | utrdlp[i].response_upiu_length = |
3ca316c5 | 3449 | cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); |
75b1cc4a | 3450 | } |
7a3e97b0 SY |
3451 | |
3452 | hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); | |
ff8e20c6 DR |
3453 | hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr + |
3454 | (i * sizeof(struct utp_transfer_req_desc)); | |
5a0b0cb9 SRT |
3455 | hba->lrb[i].ucd_req_ptr = |
3456 | (struct utp_upiu_req *)(cmd_descp + i); | |
ff8e20c6 | 3457 | hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr; |
7a3e97b0 SY |
3458 | hba->lrb[i].ucd_rsp_ptr = |
3459 | (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; | |
ff8e20c6 DR |
3460 | hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr + |
3461 | response_offset; | |
7a3e97b0 SY |
3462 | hba->lrb[i].ucd_prdt_ptr = |
3463 | (struct ufshcd_sg_entry *)cmd_descp[i].prd_table; | |
ff8e20c6 DR |
3464 | hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr + |
3465 | prdt_offset; | |
7a3e97b0 SY |
3466 | } |
3467 | } | |
3468 | ||
3469 | /** | |
3470 | * ufshcd_dme_link_startup - Notify Unipro to perform link startup | |
3471 | * @hba: per adapter instance | |
3472 | * | |
3473 | * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer, | |
3474 | * in order to initialize the Unipro link startup procedure. | |
3475 | * Once the Unipro links are up, the device connected to the controller | |
3476 | * is detected. | |
3477 | * | |
3478 | * Returns 0 on success, non-zero value on failure | |
3479 | */ | |
3480 | static int ufshcd_dme_link_startup(struct ufs_hba *hba) | |
3481 | { | |
6ccf44fe SJ |
3482 | struct uic_command uic_cmd = {0}; |
3483 | int ret; | |
7a3e97b0 | 3484 | |
6ccf44fe | 3485 | uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; |
7a3e97b0 | 3486 | |
6ccf44fe SJ |
3487 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); |
3488 | if (ret) | |
ff8e20c6 | 3489 | dev_dbg(hba->dev, |
6ccf44fe SJ |
3490 | "dme-link-startup: error code %d\n", ret); |
3491 | return ret; | |
7a3e97b0 | 3492 | } |
4404c5de AA |
3493 | /** |
3494 | * ufshcd_dme_reset - UIC command for DME_RESET | |
3495 | * @hba: per adapter instance | |
3496 | * | |
3497 | * DME_RESET command is issued in order to reset UniPro stack. | |
3498 | * This function now deal with cold reset. | |
3499 | * | |
3500 | * Returns 0 on success, non-zero value on failure | |
3501 | */ | |
3502 | static int ufshcd_dme_reset(struct ufs_hba *hba) | |
3503 | { | |
3504 | struct uic_command uic_cmd = {0}; | |
3505 | int ret; | |
3506 | ||
3507 | uic_cmd.command = UIC_CMD_DME_RESET; | |
3508 | ||
3509 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | |
3510 | if (ret) | |
3511 | dev_err(hba->dev, | |
3512 | "dme-reset: error code %d\n", ret); | |
3513 | ||
3514 | return ret; | |
3515 | } | |
3516 | ||
3517 | /** | |
3518 | * ufshcd_dme_enable - UIC command for DME_ENABLE | |
3519 | * @hba: per adapter instance | |
3520 | * | |
3521 | * DME_ENABLE command is issued in order to enable UniPro stack. | |
3522 | * | |
3523 | * Returns 0 on success, non-zero value on failure | |
3524 | */ | |
3525 | static int ufshcd_dme_enable(struct ufs_hba *hba) | |
3526 | { | |
3527 | struct uic_command uic_cmd = {0}; | |
3528 | int ret; | |
3529 | ||
3530 | uic_cmd.command = UIC_CMD_DME_ENABLE; | |
3531 | ||
3532 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | |
3533 | if (ret) | |
3534 | dev_err(hba->dev, | |
3535 | "dme-reset: error code %d\n", ret); | |
3536 | ||
3537 | return ret; | |
3538 | } | |
7a3e97b0 | 3539 | |
cad2e03d YG |
3540 | static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) |
3541 | { | |
3542 | #define MIN_DELAY_BEFORE_DME_CMDS_US 1000 | |
3543 | unsigned long min_sleep_time_us; | |
3544 | ||
3545 | if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) | |
3546 | return; | |
3547 | ||
3548 | /* | |
3549 | * last_dme_cmd_tstamp will be 0 only for 1st call to | |
3550 | * this function | |
3551 | */ | |
3552 | if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { | |
3553 | min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US; | |
3554 | } else { | |
3555 | unsigned long delta = | |
3556 | (unsigned long) ktime_to_us( | |
3557 | ktime_sub(ktime_get(), | |
3558 | hba->last_dme_cmd_tstamp)); | |
3559 | ||
3560 | if (delta < MIN_DELAY_BEFORE_DME_CMDS_US) | |
3561 | min_sleep_time_us = | |
3562 | MIN_DELAY_BEFORE_DME_CMDS_US - delta; | |
3563 | else | |
3564 | return; /* no more delay required */ | |
3565 | } | |
3566 | ||
3567 | /* allow sleep for extra 50us if needed */ | |
3568 | usleep_range(min_sleep_time_us, min_sleep_time_us + 50); | |
3569 | } | |
3570 | ||
12b4fdb4 SJ |
3571 | /** |
3572 | * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET | |
3573 | * @hba: per adapter instance | |
3574 | * @attr_sel: uic command argument1 | |
3575 | * @attr_set: attribute set type as uic command argument2 | |
3576 | * @mib_val: setting value as uic command argument3 | |
3577 | * @peer: indicate whether peer or local | |
3578 | * | |
3579 | * Returns 0 on success, non-zero value on failure | |
3580 | */ | |
3581 | int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, | |
3582 | u8 attr_set, u32 mib_val, u8 peer) | |
3583 | { | |
3584 | struct uic_command uic_cmd = {0}; | |
3585 | static const char *const action[] = { | |
3586 | "dme-set", | |
3587 | "dme-peer-set" | |
3588 | }; | |
3589 | const char *set = action[!!peer]; | |
3590 | int ret; | |
64238fbd | 3591 | int retries = UFS_UIC_COMMAND_RETRIES; |
12b4fdb4 SJ |
3592 | |
3593 | uic_cmd.command = peer ? | |
3594 | UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; | |
3595 | uic_cmd.argument1 = attr_sel; | |
3596 | uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); | |
3597 | uic_cmd.argument3 = mib_val; | |
3598 | ||
64238fbd YG |
3599 | do { |
3600 | /* for peer attributes we retry upon failure */ | |
3601 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | |
3602 | if (ret) | |
3603 | dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", | |
3604 | set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); | |
3605 | } while (ret && peer && --retries); | |
3606 | ||
f37e9f8c | 3607 | if (ret) |
64238fbd | 3608 | dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", |
f37e9f8c YG |
3609 | set, UIC_GET_ATTR_ID(attr_sel), mib_val, |
3610 | UFS_UIC_COMMAND_RETRIES - retries); | |
12b4fdb4 SJ |
3611 | |
3612 | return ret; | |
3613 | } | |
3614 | EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); | |
3615 | ||
3616 | /** | |
3617 | * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET | |
3618 | * @hba: per adapter instance | |
3619 | * @attr_sel: uic command argument1 | |
3620 | * @mib_val: the value of the attribute as returned by the UIC command | |
3621 | * @peer: indicate whether peer or local | |
3622 | * | |
3623 | * Returns 0 on success, non-zero value on failure | |
3624 | */ | |
3625 | int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, | |
3626 | u32 *mib_val, u8 peer) | |
3627 | { | |
3628 | struct uic_command uic_cmd = {0}; | |
3629 | static const char *const action[] = { | |
3630 | "dme-get", | |
3631 | "dme-peer-get" | |
3632 | }; | |
3633 | const char *get = action[!!peer]; | |
3634 | int ret; | |
64238fbd | 3635 | int retries = UFS_UIC_COMMAND_RETRIES; |
874237f7 YG |
3636 | struct ufs_pa_layer_attr orig_pwr_info; |
3637 | struct ufs_pa_layer_attr temp_pwr_info; | |
3638 | bool pwr_mode_change = false; | |
3639 | ||
3640 | if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { | |
3641 | orig_pwr_info = hba->pwr_info; | |
3642 | temp_pwr_info = orig_pwr_info; | |
3643 | ||
3644 | if (orig_pwr_info.pwr_tx == FAST_MODE || | |
3645 | orig_pwr_info.pwr_rx == FAST_MODE) { | |
3646 | temp_pwr_info.pwr_tx = FASTAUTO_MODE; | |
3647 | temp_pwr_info.pwr_rx = FASTAUTO_MODE; | |
3648 | pwr_mode_change = true; | |
3649 | } else if (orig_pwr_info.pwr_tx == SLOW_MODE || | |
3650 | orig_pwr_info.pwr_rx == SLOW_MODE) { | |
3651 | temp_pwr_info.pwr_tx = SLOWAUTO_MODE; | |
3652 | temp_pwr_info.pwr_rx = SLOWAUTO_MODE; | |
3653 | pwr_mode_change = true; | |
3654 | } | |
3655 | if (pwr_mode_change) { | |
3656 | ret = ufshcd_change_power_mode(hba, &temp_pwr_info); | |
3657 | if (ret) | |
3658 | goto out; | |
3659 | } | |
3660 | } | |
12b4fdb4 SJ |
3661 | |
3662 | uic_cmd.command = peer ? | |
3663 | UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; | |
3664 | uic_cmd.argument1 = attr_sel; | |
3665 | ||
64238fbd YG |
3666 | do { |
3667 | /* for peer attributes we retry upon failure */ | |
3668 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | |
3669 | if (ret) | |
3670 | dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", | |
3671 | get, UIC_GET_ATTR_ID(attr_sel), ret); | |
3672 | } while (ret && peer && --retries); | |
3673 | ||
f37e9f8c | 3674 | if (ret) |
64238fbd | 3675 | dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", |
f37e9f8c YG |
3676 | get, UIC_GET_ATTR_ID(attr_sel), |
3677 | UFS_UIC_COMMAND_RETRIES - retries); | |
12b4fdb4 | 3678 | |
64238fbd | 3679 | if (mib_val && !ret) |
12b4fdb4 | 3680 | *mib_val = uic_cmd.argument3; |
874237f7 YG |
3681 | |
3682 | if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) | |
3683 | && pwr_mode_change) | |
3684 | ufshcd_change_power_mode(hba, &orig_pwr_info); | |
12b4fdb4 SJ |
3685 | out: |
3686 | return ret; | |
3687 | } | |
3688 | EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); | |
3689 | ||
53b3d9c3 | 3690 | /** |
57d104c1 SJ |
3691 | * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power |
3692 | * state) and waits for it to take effect. | |
3693 | * | |
53b3d9c3 | 3694 | * @hba: per adapter instance |
57d104c1 SJ |
3695 | * @cmd: UIC command to execute |
3696 | * | |
3697 | * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER & | |
3698 | * DME_HIBERNATE_EXIT commands take some time to take its effect on both host | |
3699 | * and device UniPro link and hence it's final completion would be indicated by | |
3700 | * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in | |
3701 | * addition to normal UIC command completion Status (UCCS). This function only | |
3702 | * returns after the relevant status bits indicate the completion. | |
53b3d9c3 SJ |
3703 | * |
3704 | * Returns 0 on success, non-zero value on failure | |
3705 | */ | |
57d104c1 | 3706 | static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) |
53b3d9c3 | 3707 | { |
57d104c1 | 3708 | struct completion uic_async_done; |
53b3d9c3 SJ |
3709 | unsigned long flags; |
3710 | u8 status; | |
3711 | int ret; | |
d75f7fe4 | 3712 | bool reenable_intr = false; |
53b3d9c3 | 3713 | |
53b3d9c3 | 3714 | mutex_lock(&hba->uic_cmd_mutex); |
57d104c1 | 3715 | init_completion(&uic_async_done); |
cad2e03d | 3716 | ufshcd_add_delay_before_dme_cmd(hba); |
53b3d9c3 SJ |
3717 | |
3718 | spin_lock_irqsave(hba->host->host_lock, flags); | |
57d104c1 | 3719 | hba->uic_async_done = &uic_async_done; |
d75f7fe4 YG |
3720 | if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { |
3721 | ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); | |
3722 | /* | |
3723 | * Make sure UIC command completion interrupt is disabled before | |
3724 | * issuing UIC command. | |
3725 | */ | |
3726 | wmb(); | |
3727 | reenable_intr = true; | |
57d104c1 | 3728 | } |
d75f7fe4 YG |
3729 | ret = __ufshcd_send_uic_cmd(hba, cmd, false); |
3730 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
57d104c1 SJ |
3731 | if (ret) { |
3732 | dev_err(hba->dev, | |
3733 | "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", | |
3734 | cmd->command, cmd->argument3, ret); | |
53b3d9c3 SJ |
3735 | goto out; |
3736 | } | |
3737 | ||
57d104c1 | 3738 | if (!wait_for_completion_timeout(hba->uic_async_done, |
53b3d9c3 SJ |
3739 | msecs_to_jiffies(UIC_CMD_TIMEOUT))) { |
3740 | dev_err(hba->dev, | |
57d104c1 SJ |
3741 | "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", |
3742 | cmd->command, cmd->argument3); | |
53b3d9c3 SJ |
3743 | ret = -ETIMEDOUT; |
3744 | goto out; | |
3745 | } | |
3746 | ||
3747 | status = ufshcd_get_upmcrs(hba); | |
3748 | if (status != PWR_LOCAL) { | |
3749 | dev_err(hba->dev, | |
479da360 | 3750 | "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", |
57d104c1 | 3751 | cmd->command, status); |
53b3d9c3 SJ |
3752 | ret = (status != PWR_OK) ? status : -1; |
3753 | } | |
3754 | out: | |
7942f7b5 VG |
3755 | if (ret) { |
3756 | ufshcd_print_host_state(hba); | |
3757 | ufshcd_print_pwr_info(hba); | |
3758 | ufshcd_print_host_regs(hba); | |
3759 | } | |
3760 | ||
53b3d9c3 | 3761 | spin_lock_irqsave(hba->host->host_lock, flags); |
d75f7fe4 | 3762 | hba->active_uic_cmd = NULL; |
57d104c1 | 3763 | hba->uic_async_done = NULL; |
d75f7fe4 YG |
3764 | if (reenable_intr) |
3765 | ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); | |
53b3d9c3 SJ |
3766 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
3767 | mutex_unlock(&hba->uic_cmd_mutex); | |
1ab27c9c | 3768 | |
53b3d9c3 SJ |
3769 | return ret; |
3770 | } | |
3771 | ||
57d104c1 SJ |
3772 | /** |
3773 | * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage | |
3774 | * using DME_SET primitives. | |
3775 | * @hba: per adapter instance | |
3776 | * @mode: powr mode value | |
3777 | * | |
3778 | * Returns 0 on success, non-zero value on failure | |
3779 | */ | |
3780 | static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) | |
3781 | { | |
3782 | struct uic_command uic_cmd = {0}; | |
1ab27c9c | 3783 | int ret; |
57d104c1 | 3784 | |
c3a2f9ee YG |
3785 | if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { |
3786 | ret = ufshcd_dme_set(hba, | |
3787 | UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1); | |
3788 | if (ret) { | |
3789 | dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", | |
3790 | __func__, ret); | |
3791 | goto out; | |
3792 | } | |
3793 | } | |
3794 | ||
57d104c1 SJ |
3795 | uic_cmd.command = UIC_CMD_DME_SET; |
3796 | uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); | |
3797 | uic_cmd.argument3 = mode; | |
1ab27c9c ST |
3798 | ufshcd_hold(hba, false); |
3799 | ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | |
3800 | ufshcd_release(hba); | |
57d104c1 | 3801 | |
c3a2f9ee | 3802 | out: |
1ab27c9c | 3803 | return ret; |
57d104c1 SJ |
3804 | } |
3805 | ||
53c12d0e YG |
3806 | static int ufshcd_link_recovery(struct ufs_hba *hba) |
3807 | { | |
3808 | int ret; | |
3809 | unsigned long flags; | |
3810 | ||
3811 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3812 | hba->ufshcd_state = UFSHCD_STATE_RESET; | |
3813 | ufshcd_set_eh_in_progress(hba); | |
3814 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
3815 | ||
3816 | ret = ufshcd_host_reset_and_restore(hba); | |
3817 | ||
3818 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3819 | if (ret) | |
3820 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | |
3821 | ufshcd_clear_eh_in_progress(hba); | |
3822 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
3823 | ||
3824 | if (ret) | |
3825 | dev_err(hba->dev, "%s: link recovery failed, err %d", | |
3826 | __func__, ret); | |
3827 | ||
3828 | return ret; | |
3829 | } | |
3830 | ||
87d0b4a6 | 3831 | static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) |
57d104c1 | 3832 | { |
87d0b4a6 | 3833 | int ret; |
57d104c1 | 3834 | struct uic_command uic_cmd = {0}; |
911a0771 | 3835 | ktime_t start = ktime_get(); |
57d104c1 | 3836 | |
ee32c909 KK |
3837 | ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); |
3838 | ||
57d104c1 | 3839 | uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; |
87d0b4a6 | 3840 | ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); |
911a0771 SJ |
3841 | trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", |
3842 | ktime_to_us(ktime_sub(ktime_get(), start)), ret); | |
87d0b4a6 | 3843 | |
53c12d0e | 3844 | if (ret) { |
87d0b4a6 YG |
3845 | dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", |
3846 | __func__, ret); | |
3847 | ||
53c12d0e YG |
3848 | /* |
3849 | * If link recovery fails then return error so that caller | |
3850 | * don't retry the hibern8 enter again. | |
3851 | */ | |
3852 | if (ufshcd_link_recovery(hba)) | |
3853 | ret = -ENOLINK; | |
ee32c909 KK |
3854 | } else |
3855 | ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, | |
3856 | POST_CHANGE); | |
53c12d0e | 3857 | |
87d0b4a6 YG |
3858 | return ret; |
3859 | } | |
3860 | ||
3861 | static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) | |
3862 | { | |
3863 | int ret = 0, retries; | |
57d104c1 | 3864 | |
87d0b4a6 YG |
3865 | for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) { |
3866 | ret = __ufshcd_uic_hibern8_enter(hba); | |
3867 | if (!ret || ret == -ENOLINK) | |
3868 | goto out; | |
3869 | } | |
3870 | out: | |
3871 | return ret; | |
57d104c1 SJ |
3872 | } |
3873 | ||
3874 | static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) | |
3875 | { | |
3876 | struct uic_command uic_cmd = {0}; | |
3877 | int ret; | |
911a0771 | 3878 | ktime_t start = ktime_get(); |
57d104c1 | 3879 | |
ee32c909 KK |
3880 | ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); |
3881 | ||
57d104c1 SJ |
3882 | uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; |
3883 | ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | |
911a0771 SJ |
3884 | trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", |
3885 | ktime_to_us(ktime_sub(ktime_get(), start)), ret); | |
3886 | ||
57d104c1 | 3887 | if (ret) { |
53c12d0e YG |
3888 | dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", |
3889 | __func__, ret); | |
3890 | ret = ufshcd_link_recovery(hba); | |
ff8e20c6 | 3891 | } else { |
ee32c909 KK |
3892 | ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, |
3893 | POST_CHANGE); | |
ff8e20c6 DR |
3894 | hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get(); |
3895 | hba->ufs_stats.hibern8_exit_cnt++; | |
3896 | } | |
57d104c1 SJ |
3897 | |
3898 | return ret; | |
3899 | } | |
3900 | ||
ad448378 AH |
3901 | static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) |
3902 | { | |
3903 | unsigned long flags; | |
3904 | ||
3905 | if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit) | |
3906 | return; | |
3907 | ||
3908 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3909 | ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); | |
3910 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
3911 | } | |
3912 | ||
5064636c YG |
3913 | /** |
3914 | * ufshcd_init_pwr_info - setting the POR (power on reset) | |
3915 | * values in hba power info | |
3916 | * @hba: per-adapter instance | |
3917 | */ | |
3918 | static void ufshcd_init_pwr_info(struct ufs_hba *hba) | |
3919 | { | |
3920 | hba->pwr_info.gear_rx = UFS_PWM_G1; | |
3921 | hba->pwr_info.gear_tx = UFS_PWM_G1; | |
3922 | hba->pwr_info.lane_rx = 1; | |
3923 | hba->pwr_info.lane_tx = 1; | |
3924 | hba->pwr_info.pwr_rx = SLOWAUTO_MODE; | |
3925 | hba->pwr_info.pwr_tx = SLOWAUTO_MODE; | |
3926 | hba->pwr_info.hs_rate = 0; | |
3927 | } | |
3928 | ||
d3e89bac | 3929 | /** |
7eb584db DR |
3930 | * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device |
3931 | * @hba: per-adapter instance | |
d3e89bac | 3932 | */ |
7eb584db | 3933 | static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) |
d3e89bac | 3934 | { |
7eb584db DR |
3935 | struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; |
3936 | ||
3937 | if (hba->max_pwr_info.is_valid) | |
3938 | return 0; | |
3939 | ||
2349b533 SJ |
3940 | pwr_info->pwr_tx = FAST_MODE; |
3941 | pwr_info->pwr_rx = FAST_MODE; | |
7eb584db | 3942 | pwr_info->hs_rate = PA_HS_MODE_B; |
d3e89bac SJ |
3943 | |
3944 | /* Get the connected lane count */ | |
7eb584db DR |
3945 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), |
3946 | &pwr_info->lane_rx); | |
3947 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), | |
3948 | &pwr_info->lane_tx); | |
3949 | ||
3950 | if (!pwr_info->lane_rx || !pwr_info->lane_tx) { | |
3951 | dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", | |
3952 | __func__, | |
3953 | pwr_info->lane_rx, | |
3954 | pwr_info->lane_tx); | |
3955 | return -EINVAL; | |
3956 | } | |
d3e89bac SJ |
3957 | |
3958 | /* | |
3959 | * First, get the maximum gears of HS speed. | |
3960 | * If a zero value, it means there is no HSGEAR capability. | |
3961 | * Then, get the maximum gears of PWM speed. | |
3962 | */ | |
7eb584db DR |
3963 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); |
3964 | if (!pwr_info->gear_rx) { | |
3965 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), | |
3966 | &pwr_info->gear_rx); | |
3967 | if (!pwr_info->gear_rx) { | |
3968 | dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", | |
3969 | __func__, pwr_info->gear_rx); | |
3970 | return -EINVAL; | |
3971 | } | |
2349b533 | 3972 | pwr_info->pwr_rx = SLOW_MODE; |
d3e89bac SJ |
3973 | } |
3974 | ||
7eb584db DR |
3975 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), |
3976 | &pwr_info->gear_tx); | |
3977 | if (!pwr_info->gear_tx) { | |
d3e89bac | 3978 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), |
7eb584db DR |
3979 | &pwr_info->gear_tx); |
3980 | if (!pwr_info->gear_tx) { | |
3981 | dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", | |
3982 | __func__, pwr_info->gear_tx); | |
3983 | return -EINVAL; | |
3984 | } | |
2349b533 | 3985 | pwr_info->pwr_tx = SLOW_MODE; |
7eb584db DR |
3986 | } |
3987 | ||
3988 | hba->max_pwr_info.is_valid = true; | |
3989 | return 0; | |
3990 | } | |
3991 | ||
3992 | static int ufshcd_change_power_mode(struct ufs_hba *hba, | |
3993 | struct ufs_pa_layer_attr *pwr_mode) | |
3994 | { | |
3995 | int ret; | |
3996 | ||
3997 | /* if already configured to the requested pwr_mode */ | |
3998 | if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && | |
3999 | pwr_mode->gear_tx == hba->pwr_info.gear_tx && | |
4000 | pwr_mode->lane_rx == hba->pwr_info.lane_rx && | |
4001 | pwr_mode->lane_tx == hba->pwr_info.lane_tx && | |
4002 | pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && | |
4003 | pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && | |
4004 | pwr_mode->hs_rate == hba->pwr_info.hs_rate) { | |
4005 | dev_dbg(hba->dev, "%s: power already configured\n", __func__); | |
4006 | return 0; | |
d3e89bac SJ |
4007 | } |
4008 | ||
4009 | /* | |
4010 | * Configure attributes for power mode change with below. | |
4011 | * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, | |
4012 | * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, | |
4013 | * - PA_HSSERIES | |
4014 | */ | |
7eb584db DR |
4015 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); |
4016 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), | |
4017 | pwr_mode->lane_rx); | |
4018 | if (pwr_mode->pwr_rx == FASTAUTO_MODE || | |
4019 | pwr_mode->pwr_rx == FAST_MODE) | |
d3e89bac | 4020 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); |
7eb584db DR |
4021 | else |
4022 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); | |
d3e89bac | 4023 | |
7eb584db DR |
4024 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); |
4025 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), | |
4026 | pwr_mode->lane_tx); | |
4027 | if (pwr_mode->pwr_tx == FASTAUTO_MODE || | |
4028 | pwr_mode->pwr_tx == FAST_MODE) | |
d3e89bac | 4029 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); |
7eb584db DR |
4030 | else |
4031 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); | |
d3e89bac | 4032 | |
7eb584db DR |
4033 | if (pwr_mode->pwr_rx == FASTAUTO_MODE || |
4034 | pwr_mode->pwr_tx == FASTAUTO_MODE || | |
4035 | pwr_mode->pwr_rx == FAST_MODE || | |
4036 | pwr_mode->pwr_tx == FAST_MODE) | |
4037 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), | |
4038 | pwr_mode->hs_rate); | |
d3e89bac | 4039 | |
7eb584db DR |
4040 | ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 |
4041 | | pwr_mode->pwr_tx); | |
4042 | ||
4043 | if (ret) { | |
d3e89bac | 4044 | dev_err(hba->dev, |
7eb584db DR |
4045 | "%s: power mode change failed %d\n", __func__, ret); |
4046 | } else { | |
0263bcd0 YG |
4047 | ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, |
4048 | pwr_mode); | |
7eb584db DR |
4049 | |
4050 | memcpy(&hba->pwr_info, pwr_mode, | |
4051 | sizeof(struct ufs_pa_layer_attr)); | |
4052 | } | |
4053 | ||
4054 | return ret; | |
4055 | } | |
4056 | ||
4057 | /** | |
4058 | * ufshcd_config_pwr_mode - configure a new power mode | |
4059 | * @hba: per-adapter instance | |
4060 | * @desired_pwr_mode: desired power configuration | |
4061 | */ | |
0d846e70 | 4062 | int ufshcd_config_pwr_mode(struct ufs_hba *hba, |
7eb584db DR |
4063 | struct ufs_pa_layer_attr *desired_pwr_mode) |
4064 | { | |
4065 | struct ufs_pa_layer_attr final_params = { 0 }; | |
4066 | int ret; | |
4067 | ||
0263bcd0 YG |
4068 | ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, |
4069 | desired_pwr_mode, &final_params); | |
4070 | ||
4071 | if (ret) | |
7eb584db DR |
4072 | memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); |
4073 | ||
4074 | ret = ufshcd_change_power_mode(hba, &final_params); | |
a3cd5ec5 SJ |
4075 | if (!ret) |
4076 | ufshcd_print_pwr_info(hba); | |
d3e89bac SJ |
4077 | |
4078 | return ret; | |
4079 | } | |
0d846e70 | 4080 | EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); |
d3e89bac | 4081 | |
68078d5c DR |
4082 | /** |
4083 | * ufshcd_complete_dev_init() - checks device readiness | |
8aa29f19 | 4084 | * @hba: per-adapter instance |
68078d5c DR |
4085 | * |
4086 | * Set fDeviceInit flag and poll until device toggles it. | |
4087 | */ | |
4088 | static int ufshcd_complete_dev_init(struct ufs_hba *hba) | |
4089 | { | |
dc3c8d3a YG |
4090 | int i; |
4091 | int err; | |
68078d5c DR |
4092 | bool flag_res = 1; |
4093 | ||
dc3c8d3a YG |
4094 | err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, |
4095 | QUERY_FLAG_IDN_FDEVICEINIT, NULL); | |
68078d5c DR |
4096 | if (err) { |
4097 | dev_err(hba->dev, | |
4098 | "%s setting fDeviceInit flag failed with error %d\n", | |
4099 | __func__, err); | |
4100 | goto out; | |
4101 | } | |
4102 | ||
dc3c8d3a YG |
4103 | /* poll for max. 1000 iterations for fDeviceInit flag to clear */ |
4104 | for (i = 0; i < 1000 && !err && flag_res; i++) | |
4105 | err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, | |
4106 | QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); | |
4107 | ||
68078d5c DR |
4108 | if (err) |
4109 | dev_err(hba->dev, | |
4110 | "%s reading fDeviceInit flag failed with error %d\n", | |
4111 | __func__, err); | |
4112 | else if (flag_res) | |
4113 | dev_err(hba->dev, | |
4114 | "%s fDeviceInit was not cleared by the device\n", | |
4115 | __func__); | |
4116 | ||
4117 | out: | |
4118 | return err; | |
4119 | } | |
4120 | ||
7a3e97b0 SY |
4121 | /** |
4122 | * ufshcd_make_hba_operational - Make UFS controller operational | |
4123 | * @hba: per adapter instance | |
4124 | * | |
4125 | * To bring UFS host controller to operational state, | |
5c0c28a8 SRT |
4126 | * 1. Enable required interrupts |
4127 | * 2. Configure interrupt aggregation | |
897efe62 | 4128 | * 3. Program UTRL and UTMRL base address |
5c0c28a8 | 4129 | * 4. Configure run-stop-registers |
7a3e97b0 SY |
4130 | * |
4131 | * Returns 0 on success, non-zero value on failure | |
4132 | */ | |
4133 | static int ufshcd_make_hba_operational(struct ufs_hba *hba) | |
4134 | { | |
4135 | int err = 0; | |
4136 | u32 reg; | |
4137 | ||
6ccf44fe SJ |
4138 | /* Enable required interrupts */ |
4139 | ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); | |
4140 | ||
4141 | /* Configure interrupt aggregation */ | |
b852190e YG |
4142 | if (ufshcd_is_intr_aggr_allowed(hba)) |
4143 | ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); | |
4144 | else | |
4145 | ufshcd_disable_intr_aggr(hba); | |
6ccf44fe SJ |
4146 | |
4147 | /* Configure UTRL and UTMRL base address registers */ | |
4148 | ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), | |
4149 | REG_UTP_TRANSFER_REQ_LIST_BASE_L); | |
4150 | ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), | |
4151 | REG_UTP_TRANSFER_REQ_LIST_BASE_H); | |
4152 | ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), | |
4153 | REG_UTP_TASK_REQ_LIST_BASE_L); | |
4154 | ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), | |
4155 | REG_UTP_TASK_REQ_LIST_BASE_H); | |
4156 | ||
897efe62 YG |
4157 | /* |
4158 | * Make sure base address and interrupt setup are updated before | |
4159 | * enabling the run/stop registers below. | |
4160 | */ | |
4161 | wmb(); | |
4162 | ||
7a3e97b0 SY |
4163 | /* |
4164 | * UCRDY, UTMRLDY and UTRLRDY bits must be 1 | |
7a3e97b0 | 4165 | */ |
5c0c28a8 | 4166 | reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); |
7a3e97b0 SY |
4167 | if (!(ufshcd_get_lists_status(reg))) { |
4168 | ufshcd_enable_run_stop_reg(hba); | |
4169 | } else { | |
3b1d0580 | 4170 | dev_err(hba->dev, |
7a3e97b0 SY |
4171 | "Host controller not ready to process requests"); |
4172 | err = -EIO; | |
4173 | goto out; | |
4174 | } | |
4175 | ||
7a3e97b0 SY |
4176 | out: |
4177 | return err; | |
4178 | } | |
4179 | ||
596585a2 YG |
4180 | /** |
4181 | * ufshcd_hba_stop - Send controller to reset state | |
4182 | * @hba: per adapter instance | |
4183 | * @can_sleep: perform sleep or just spin | |
4184 | */ | |
4185 | static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep) | |
4186 | { | |
4187 | int err; | |
4188 | ||
4189 | ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); | |
4190 | err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, | |
4191 | CONTROLLER_ENABLE, CONTROLLER_DISABLE, | |
4192 | 10, 1, can_sleep); | |
4193 | if (err) | |
4194 | dev_err(hba->dev, "%s: Controller disable failed\n", __func__); | |
4195 | } | |
4196 | ||
7a3e97b0 | 4197 | /** |
4404c5de | 4198 | * ufshcd_hba_execute_hce - initialize the controller |
7a3e97b0 SY |
4199 | * @hba: per adapter instance |
4200 | * | |
4201 | * The controller resets itself and controller firmware initialization | |
4202 | * sequence kicks off. When controller is ready it will set | |
4203 | * the Host Controller Enable bit to 1. | |
4204 | * | |
4205 | * Returns 0 on success, non-zero value on failure | |
4206 | */ | |
4404c5de | 4207 | static int ufshcd_hba_execute_hce(struct ufs_hba *hba) |
7a3e97b0 SY |
4208 | { |
4209 | int retry; | |
4210 | ||
4211 | /* | |
4212 | * msleep of 1 and 5 used in this function might result in msleep(20), | |
4213 | * but it was necessary to send the UFS FPGA to reset mode during | |
4214 | * development and testing of this driver. msleep can be changed to | |
4215 | * mdelay and retry count can be reduced based on the controller. | |
4216 | */ | |
596585a2 | 4217 | if (!ufshcd_is_hba_active(hba)) |
7a3e97b0 | 4218 | /* change controller state to "reset state" */ |
596585a2 | 4219 | ufshcd_hba_stop(hba, true); |
7a3e97b0 | 4220 | |
57d104c1 SJ |
4221 | /* UniPro link is disabled at this point */ |
4222 | ufshcd_set_link_off(hba); | |
4223 | ||
0263bcd0 | 4224 | ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); |
5c0c28a8 | 4225 | |
7a3e97b0 SY |
4226 | /* start controller initialization sequence */ |
4227 | ufshcd_hba_start(hba); | |
4228 | ||
4229 | /* | |
4230 | * To initialize a UFS host controller HCE bit must be set to 1. | |
4231 | * During initialization the HCE bit value changes from 1->0->1. | |
4232 | * When the host controller completes initialization sequence | |
4233 | * it sets the value of HCE bit to 1. The same HCE bit is read back | |
4234 | * to check if the controller has completed initialization sequence. | |
4235 | * So without this delay the value HCE = 1, set in the previous | |
4236 | * instruction might be read back. | |
4237 | * This delay can be changed based on the controller. | |
4238 | */ | |
4239 | msleep(1); | |
4240 | ||
4241 | /* wait for the host controller to complete initialization */ | |
4242 | retry = 10; | |
4243 | while (ufshcd_is_hba_active(hba)) { | |
4244 | if (retry) { | |
4245 | retry--; | |
4246 | } else { | |
3b1d0580 | 4247 | dev_err(hba->dev, |
7a3e97b0 SY |
4248 | "Controller enable failed\n"); |
4249 | return -EIO; | |
4250 | } | |
4251 | msleep(5); | |
4252 | } | |
5c0c28a8 | 4253 | |
1d337ec2 | 4254 | /* enable UIC related interrupts */ |
57d104c1 | 4255 | ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); |
1d337ec2 | 4256 | |
0263bcd0 | 4257 | ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); |
5c0c28a8 | 4258 | |
7a3e97b0 SY |
4259 | return 0; |
4260 | } | |
4261 | ||
4404c5de AA |
4262 | static int ufshcd_hba_enable(struct ufs_hba *hba) |
4263 | { | |
4264 | int ret; | |
4265 | ||
4266 | if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) { | |
4267 | ufshcd_set_link_off(hba); | |
4268 | ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); | |
4269 | ||
4270 | /* enable UIC related interrupts */ | |
4271 | ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); | |
4272 | ret = ufshcd_dme_reset(hba); | |
4273 | if (!ret) { | |
4274 | ret = ufshcd_dme_enable(hba); | |
4275 | if (!ret) | |
4276 | ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); | |
4277 | if (ret) | |
4278 | dev_err(hba->dev, | |
4279 | "Host controller enable failed with non-hce\n"); | |
4280 | } | |
4281 | } else { | |
4282 | ret = ufshcd_hba_execute_hce(hba); | |
4283 | } | |
4284 | ||
4285 | return ret; | |
4286 | } | |
7ca38cf3 YG |
4287 | static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) |
4288 | { | |
4289 | int tx_lanes, i, err = 0; | |
4290 | ||
4291 | if (!peer) | |
4292 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), | |
4293 | &tx_lanes); | |
4294 | else | |
4295 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), | |
4296 | &tx_lanes); | |
4297 | for (i = 0; i < tx_lanes; i++) { | |
4298 | if (!peer) | |
4299 | err = ufshcd_dme_set(hba, | |
4300 | UIC_ARG_MIB_SEL(TX_LCC_ENABLE, | |
4301 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), | |
4302 | 0); | |
4303 | else | |
4304 | err = ufshcd_dme_peer_set(hba, | |
4305 | UIC_ARG_MIB_SEL(TX_LCC_ENABLE, | |
4306 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), | |
4307 | 0); | |
4308 | if (err) { | |
4309 | dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", | |
4310 | __func__, peer, i, err); | |
4311 | break; | |
4312 | } | |
4313 | } | |
4314 | ||
4315 | return err; | |
4316 | } | |
4317 | ||
4318 | static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) | |
4319 | { | |
4320 | return ufshcd_disable_tx_lcc(hba, true); | |
4321 | } | |
4322 | ||
7a3e97b0 | 4323 | /** |
6ccf44fe | 4324 | * ufshcd_link_startup - Initialize unipro link startup |
7a3e97b0 SY |
4325 | * @hba: per adapter instance |
4326 | * | |
6ccf44fe | 4327 | * Returns 0 for success, non-zero in case of failure |
7a3e97b0 | 4328 | */ |
6ccf44fe | 4329 | static int ufshcd_link_startup(struct ufs_hba *hba) |
7a3e97b0 | 4330 | { |
6ccf44fe | 4331 | int ret; |
1d337ec2 | 4332 | int retries = DME_LINKSTARTUP_RETRIES; |
7caf489b | 4333 | bool link_startup_again = false; |
7a3e97b0 | 4334 | |
7caf489b SJ |
4335 | /* |
4336 | * If UFS device isn't active then we will have to issue link startup | |
4337 | * 2 times to make sure the device state move to active. | |
4338 | */ | |
4339 | if (!ufshcd_is_ufs_dev_active(hba)) | |
4340 | link_startup_again = true; | |
7a3e97b0 | 4341 | |
7caf489b | 4342 | link_startup: |
1d337ec2 | 4343 | do { |
0263bcd0 | 4344 | ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); |
6ccf44fe | 4345 | |
1d337ec2 | 4346 | ret = ufshcd_dme_link_startup(hba); |
5c0c28a8 | 4347 | |
1d337ec2 SRT |
4348 | /* check if device is detected by inter-connect layer */ |
4349 | if (!ret && !ufshcd_is_device_present(hba)) { | |
4350 | dev_err(hba->dev, "%s: Device not present\n", __func__); | |
4351 | ret = -ENXIO; | |
4352 | goto out; | |
4353 | } | |
6ccf44fe | 4354 | |
1d337ec2 SRT |
4355 | /* |
4356 | * DME link lost indication is only received when link is up, | |
4357 | * but we can't be sure if the link is up until link startup | |
4358 | * succeeds. So reset the local Uni-Pro and try again. | |
4359 | */ | |
4360 | if (ret && ufshcd_hba_enable(hba)) | |
4361 | goto out; | |
4362 | } while (ret && retries--); | |
4363 | ||
4364 | if (ret) | |
4365 | /* failed to get the link up... retire */ | |
5c0c28a8 | 4366 | goto out; |
5c0c28a8 | 4367 | |
7caf489b SJ |
4368 | if (link_startup_again) { |
4369 | link_startup_again = false; | |
4370 | retries = DME_LINKSTARTUP_RETRIES; | |
4371 | goto link_startup; | |
4372 | } | |
4373 | ||
d2aebb9b SJ |
4374 | /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ |
4375 | ufshcd_init_pwr_info(hba); | |
4376 | ufshcd_print_pwr_info(hba); | |
4377 | ||
7ca38cf3 YG |
4378 | if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { |
4379 | ret = ufshcd_disable_device_tx_lcc(hba); | |
4380 | if (ret) | |
4381 | goto out; | |
4382 | } | |
4383 | ||
5c0c28a8 | 4384 | /* Include any host controller configuration via UIC commands */ |
0263bcd0 YG |
4385 | ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); |
4386 | if (ret) | |
4387 | goto out; | |
7a3e97b0 | 4388 | |
5c0c28a8 | 4389 | ret = ufshcd_make_hba_operational(hba); |
6ccf44fe | 4390 | out: |
7942f7b5 | 4391 | if (ret) { |
6ccf44fe | 4392 | dev_err(hba->dev, "link startup failed %d\n", ret); |
7942f7b5 VG |
4393 | ufshcd_print_host_state(hba); |
4394 | ufshcd_print_pwr_info(hba); | |
4395 | ufshcd_print_host_regs(hba); | |
4396 | } | |
6ccf44fe | 4397 | return ret; |
7a3e97b0 SY |
4398 | } |
4399 | ||
5a0b0cb9 SRT |
4400 | /** |
4401 | * ufshcd_verify_dev_init() - Verify device initialization | |
4402 | * @hba: per-adapter instance | |
4403 | * | |
4404 | * Send NOP OUT UPIU and wait for NOP IN response to check whether the | |
4405 | * device Transport Protocol (UTP) layer is ready after a reset. | |
4406 | * If the UTP layer at the device side is not initialized, it may | |
4407 | * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT | |
4408 | * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. | |
4409 | */ | |
4410 | static int ufshcd_verify_dev_init(struct ufs_hba *hba) | |
4411 | { | |
4412 | int err = 0; | |
4413 | int retries; | |
4414 | ||
1ab27c9c | 4415 | ufshcd_hold(hba, false); |
5a0b0cb9 SRT |
4416 | mutex_lock(&hba->dev_cmd.lock); |
4417 | for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { | |
4418 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, | |
4419 | NOP_OUT_TIMEOUT); | |
4420 | ||
4421 | if (!err || err == -ETIMEDOUT) | |
4422 | break; | |
4423 | ||
4424 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); | |
4425 | } | |
4426 | mutex_unlock(&hba->dev_cmd.lock); | |
1ab27c9c | 4427 | ufshcd_release(hba); |
5a0b0cb9 SRT |
4428 | |
4429 | if (err) | |
4430 | dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); | |
4431 | return err; | |
4432 | } | |
4433 | ||
0ce147d4 SJ |
4434 | /** |
4435 | * ufshcd_set_queue_depth - set lun queue depth | |
4436 | * @sdev: pointer to SCSI device | |
4437 | * | |
4438 | * Read bLUQueueDepth value and activate scsi tagged command | |
4439 | * queueing. For WLUN, queue depth is set to 1. For best-effort | |
4440 | * cases (bLUQueueDepth = 0) the queue depth is set to a maximum | |
4441 | * value that host can queue. | |
4442 | */ | |
4443 | static void ufshcd_set_queue_depth(struct scsi_device *sdev) | |
4444 | { | |
4445 | int ret = 0; | |
4446 | u8 lun_qdepth; | |
4447 | struct ufs_hba *hba; | |
4448 | ||
4449 | hba = shost_priv(sdev->host); | |
4450 | ||
4451 | lun_qdepth = hba->nutrs; | |
dbd34a61 SM |
4452 | ret = ufshcd_read_unit_desc_param(hba, |
4453 | ufshcd_scsi_to_upiu_lun(sdev->lun), | |
4454 | UNIT_DESC_PARAM_LU_Q_DEPTH, | |
4455 | &lun_qdepth, | |
4456 | sizeof(lun_qdepth)); | |
0ce147d4 SJ |
4457 | |
4458 | /* Some WLUN doesn't support unit descriptor */ | |
4459 | if (ret == -EOPNOTSUPP) | |
4460 | lun_qdepth = 1; | |
4461 | else if (!lun_qdepth) | |
4462 | /* eventually, we can figure out the real queue depth */ | |
4463 | lun_qdepth = hba->nutrs; | |
4464 | else | |
4465 | lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); | |
4466 | ||
4467 | dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", | |
4468 | __func__, lun_qdepth); | |
db5ed4df | 4469 | scsi_change_queue_depth(sdev, lun_qdepth); |
0ce147d4 SJ |
4470 | } |
4471 | ||
57d104c1 SJ |
4472 | /* |
4473 | * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR | |
4474 | * @hba: per-adapter instance | |
4475 | * @lun: UFS device lun id | |
4476 | * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info | |
4477 | * | |
4478 | * Returns 0 in case of success and b_lu_write_protect status would be returned | |
4479 | * @b_lu_write_protect parameter. | |
4480 | * Returns -ENOTSUPP if reading b_lu_write_protect is not supported. | |
4481 | * Returns -EINVAL in case of invalid parameters passed to this function. | |
4482 | */ | |
4483 | static int ufshcd_get_lu_wp(struct ufs_hba *hba, | |
4484 | u8 lun, | |
4485 | u8 *b_lu_write_protect) | |
4486 | { | |
4487 | int ret; | |
4488 | ||
4489 | if (!b_lu_write_protect) | |
4490 | ret = -EINVAL; | |
4491 | /* | |
4492 | * According to UFS device spec, RPMB LU can't be write | |
4493 | * protected so skip reading bLUWriteProtect parameter for | |
4494 | * it. For other W-LUs, UNIT DESCRIPTOR is not available. | |
4495 | */ | |
4496 | else if (lun >= UFS_UPIU_MAX_GENERAL_LUN) | |
4497 | ret = -ENOTSUPP; | |
4498 | else | |
4499 | ret = ufshcd_read_unit_desc_param(hba, | |
4500 | lun, | |
4501 | UNIT_DESC_PARAM_LU_WR_PROTECT, | |
4502 | b_lu_write_protect, | |
4503 | sizeof(*b_lu_write_protect)); | |
4504 | return ret; | |
4505 | } | |
4506 | ||
4507 | /** | |
4508 | * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect | |
4509 | * status | |
4510 | * @hba: per-adapter instance | |
4511 | * @sdev: pointer to SCSI device | |
4512 | * | |
4513 | */ | |
4514 | static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, | |
4515 | struct scsi_device *sdev) | |
4516 | { | |
4517 | if (hba->dev_info.f_power_on_wp_en && | |
4518 | !hba->dev_info.is_lu_power_on_wp) { | |
4519 | u8 b_lu_write_protect; | |
4520 | ||
4521 | if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), | |
4522 | &b_lu_write_protect) && | |
4523 | (b_lu_write_protect == UFS_LU_POWER_ON_WP)) | |
4524 | hba->dev_info.is_lu_power_on_wp = true; | |
4525 | } | |
4526 | } | |
4527 | ||
7a3e97b0 SY |
4528 | /** |
4529 | * ufshcd_slave_alloc - handle initial SCSI device configurations | |
4530 | * @sdev: pointer to SCSI device | |
4531 | * | |
4532 | * Returns success | |
4533 | */ | |
4534 | static int ufshcd_slave_alloc(struct scsi_device *sdev) | |
4535 | { | |
4536 | struct ufs_hba *hba; | |
4537 | ||
4538 | hba = shost_priv(sdev->host); | |
7a3e97b0 SY |
4539 | |
4540 | /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ | |
4541 | sdev->use_10_for_ms = 1; | |
7a3e97b0 | 4542 | |
e8e7f271 SRT |
4543 | /* allow SCSI layer to restart the device in case of errors */ |
4544 | sdev->allow_restart = 1; | |
4264fd61 | 4545 | |
b2a6c522 SRT |
4546 | /* REPORT SUPPORTED OPERATION CODES is not supported */ |
4547 | sdev->no_report_opcodes = 1; | |
4548 | ||
84af7e8b SRT |
4549 | /* WRITE_SAME command is not supported */ |
4550 | sdev->no_write_same = 1; | |
e8e7f271 | 4551 | |
0ce147d4 | 4552 | ufshcd_set_queue_depth(sdev); |
4264fd61 | 4553 | |
57d104c1 SJ |
4554 | ufshcd_get_lu_power_on_wp_status(hba, sdev); |
4555 | ||
7a3e97b0 SY |
4556 | return 0; |
4557 | } | |
4558 | ||
4264fd61 SRT |
4559 | /** |
4560 | * ufshcd_change_queue_depth - change queue depth | |
4561 | * @sdev: pointer to SCSI device | |
4562 | * @depth: required depth to set | |
4264fd61 | 4563 | * |
db5ed4df | 4564 | * Change queue depth and make sure the max. limits are not crossed. |
4264fd61 | 4565 | */ |
db5ed4df | 4566 | static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) |
4264fd61 SRT |
4567 | { |
4568 | struct ufs_hba *hba = shost_priv(sdev->host); | |
4569 | ||
4570 | if (depth > hba->nutrs) | |
4571 | depth = hba->nutrs; | |
db5ed4df | 4572 | return scsi_change_queue_depth(sdev, depth); |
4264fd61 SRT |
4573 | } |
4574 | ||
eeda4749 AM |
4575 | /** |
4576 | * ufshcd_slave_configure - adjust SCSI device configurations | |
4577 | * @sdev: pointer to SCSI device | |
4578 | */ | |
4579 | static int ufshcd_slave_configure(struct scsi_device *sdev) | |
4580 | { | |
4581 | struct request_queue *q = sdev->request_queue; | |
4582 | ||
4583 | blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); | |
4584 | blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX); | |
4585 | ||
4586 | return 0; | |
4587 | } | |
4588 | ||
7a3e97b0 SY |
4589 | /** |
4590 | * ufshcd_slave_destroy - remove SCSI device configurations | |
4591 | * @sdev: pointer to SCSI device | |
4592 | */ | |
4593 | static void ufshcd_slave_destroy(struct scsi_device *sdev) | |
4594 | { | |
4595 | struct ufs_hba *hba; | |
4596 | ||
4597 | hba = shost_priv(sdev->host); | |
0ce147d4 | 4598 | /* Drop the reference as it won't be needed anymore */ |
7c48bfd0 AM |
4599 | if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { |
4600 | unsigned long flags; | |
4601 | ||
4602 | spin_lock_irqsave(hba->host->host_lock, flags); | |
0ce147d4 | 4603 | hba->sdev_ufs_device = NULL; |
7c48bfd0 AM |
4604 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
4605 | } | |
7a3e97b0 SY |
4606 | } |
4607 | ||
7a3e97b0 SY |
4608 | /** |
4609 | * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status | |
8aa29f19 | 4610 | * @lrbp: pointer to local reference block of completed command |
7a3e97b0 SY |
4611 | * @scsi_status: SCSI command status |
4612 | * | |
4613 | * Returns value base on SCSI command status | |
4614 | */ | |
4615 | static inline int | |
4616 | ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) | |
4617 | { | |
4618 | int result = 0; | |
4619 | ||
4620 | switch (scsi_status) { | |
7a3e97b0 | 4621 | case SAM_STAT_CHECK_CONDITION: |
1c2623c5 SJ |
4622 | ufshcd_copy_sense_data(lrbp); |
4623 | case SAM_STAT_GOOD: | |
7a3e97b0 SY |
4624 | result |= DID_OK << 16 | |
4625 | COMMAND_COMPLETE << 8 | | |
1c2623c5 | 4626 | scsi_status; |
7a3e97b0 SY |
4627 | break; |
4628 | case SAM_STAT_TASK_SET_FULL: | |
1c2623c5 | 4629 | case SAM_STAT_BUSY: |
7a3e97b0 | 4630 | case SAM_STAT_TASK_ABORTED: |
1c2623c5 SJ |
4631 | ufshcd_copy_sense_data(lrbp); |
4632 | result |= scsi_status; | |
7a3e97b0 SY |
4633 | break; |
4634 | default: | |
4635 | result |= DID_ERROR << 16; | |
4636 | break; | |
4637 | } /* end of switch */ | |
4638 | ||
4639 | return result; | |
4640 | } | |
4641 | ||
4642 | /** | |
4643 | * ufshcd_transfer_rsp_status - Get overall status of the response | |
4644 | * @hba: per adapter instance | |
8aa29f19 | 4645 | * @lrbp: pointer to local reference block of completed command |
7a3e97b0 SY |
4646 | * |
4647 | * Returns result of the command to notify SCSI midlayer | |
4648 | */ | |
4649 | static inline int | |
4650 | ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |
4651 | { | |
4652 | int result = 0; | |
4653 | int scsi_status; | |
4654 | int ocs; | |
4655 | ||
4656 | /* overall command status of utrd */ | |
4657 | ocs = ufshcd_get_tr_ocs(lrbp); | |
4658 | ||
4659 | switch (ocs) { | |
4660 | case OCS_SUCCESS: | |
5a0b0cb9 | 4661 | result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); |
ff8e20c6 | 4662 | hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); |
5a0b0cb9 SRT |
4663 | switch (result) { |
4664 | case UPIU_TRANSACTION_RESPONSE: | |
4665 | /* | |
4666 | * get the response UPIU result to extract | |
4667 | * the SCSI command status | |
4668 | */ | |
4669 | result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); | |
4670 | ||
4671 | /* | |
4672 | * get the result based on SCSI status response | |
4673 | * to notify the SCSI midlayer of the command status | |
4674 | */ | |
4675 | scsi_status = result & MASK_SCSI_STATUS; | |
4676 | result = ufshcd_scsi_cmd_status(lrbp, scsi_status); | |
66ec6d59 | 4677 | |
f05ac2e5 YG |
4678 | /* |
4679 | * Currently we are only supporting BKOPs exception | |
4680 | * events hence we can ignore BKOPs exception event | |
4681 | * during power management callbacks. BKOPs exception | |
4682 | * event is not expected to be raised in runtime suspend | |
4683 | * callback as it allows the urgent bkops. | |
4684 | * During system suspend, we are anyway forcefully | |
4685 | * disabling the bkops and if urgent bkops is needed | |
4686 | * it will be enabled on system resume. Long term | |
4687 | * solution could be to abort the system suspend if | |
4688 | * UFS device needs urgent BKOPs. | |
4689 | */ | |
4690 | if (!hba->pm_op_in_progress && | |
4691 | ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) | |
66ec6d59 | 4692 | schedule_work(&hba->eeh_work); |
5a0b0cb9 SRT |
4693 | break; |
4694 | case UPIU_TRANSACTION_REJECT_UPIU: | |
4695 | /* TODO: handle Reject UPIU Response */ | |
4696 | result = DID_ERROR << 16; | |
3b1d0580 | 4697 | dev_err(hba->dev, |
5a0b0cb9 SRT |
4698 | "Reject UPIU not fully implemented\n"); |
4699 | break; | |
4700 | default: | |
4701 | result = DID_ERROR << 16; | |
4702 | dev_err(hba->dev, | |
4703 | "Unexpected request response code = %x\n", | |
4704 | result); | |
7a3e97b0 SY |
4705 | break; |
4706 | } | |
7a3e97b0 SY |
4707 | break; |
4708 | case OCS_ABORTED: | |
4709 | result |= DID_ABORT << 16; | |
4710 | break; | |
e8e7f271 SRT |
4711 | case OCS_INVALID_COMMAND_STATUS: |
4712 | result |= DID_REQUEUE << 16; | |
4713 | break; | |
7a3e97b0 SY |
4714 | case OCS_INVALID_CMD_TABLE_ATTR: |
4715 | case OCS_INVALID_PRDT_ATTR: | |
4716 | case OCS_MISMATCH_DATA_BUF_SIZE: | |
4717 | case OCS_MISMATCH_RESP_UPIU_SIZE: | |
4718 | case OCS_PEER_COMM_FAILURE: | |
4719 | case OCS_FATAL_ERROR: | |
4720 | default: | |
4721 | result |= DID_ERROR << 16; | |
3b1d0580 | 4722 | dev_err(hba->dev, |
ff8e20c6 DR |
4723 | "OCS error from controller = %x for tag %d\n", |
4724 | ocs, lrbp->task_tag); | |
4725 | ufshcd_print_host_regs(hba); | |
6ba65588 | 4726 | ufshcd_print_host_state(hba); |
7a3e97b0 SY |
4727 | break; |
4728 | } /* end of switch */ | |
4729 | ||
66cc820f DR |
4730 | if (host_byte(result) != DID_OK) |
4731 | ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); | |
7a3e97b0 SY |
4732 | return result; |
4733 | } | |
4734 | ||
6ccf44fe SJ |
4735 | /** |
4736 | * ufshcd_uic_cmd_compl - handle completion of uic command | |
4737 | * @hba: per adapter instance | |
53b3d9c3 | 4738 | * @intr_status: interrupt status generated by the controller |
6ccf44fe | 4739 | */ |
53b3d9c3 | 4740 | static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) |
6ccf44fe | 4741 | { |
53b3d9c3 | 4742 | if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { |
6ccf44fe SJ |
4743 | hba->active_uic_cmd->argument2 |= |
4744 | ufshcd_get_uic_cmd_result(hba); | |
12b4fdb4 SJ |
4745 | hba->active_uic_cmd->argument3 = |
4746 | ufshcd_get_dme_attr_val(hba); | |
6ccf44fe SJ |
4747 | complete(&hba->active_uic_cmd->done); |
4748 | } | |
53b3d9c3 | 4749 | |
57d104c1 SJ |
4750 | if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) |
4751 | complete(hba->uic_async_done); | |
6ccf44fe SJ |
4752 | } |
4753 | ||
7a3e97b0 | 4754 | /** |
9a47ec7c | 4755 | * __ufshcd_transfer_req_compl - handle SCSI and query command completion |
7a3e97b0 | 4756 | * @hba: per adapter instance |
9a47ec7c | 4757 | * @completed_reqs: requests to complete |
7a3e97b0 | 4758 | */ |
9a47ec7c YG |
4759 | static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, |
4760 | unsigned long completed_reqs) | |
7a3e97b0 | 4761 | { |
5a0b0cb9 SRT |
4762 | struct ufshcd_lrb *lrbp; |
4763 | struct scsi_cmnd *cmd; | |
7a3e97b0 SY |
4764 | int result; |
4765 | int index; | |
e9d501b1 | 4766 | |
e9d501b1 DR |
4767 | for_each_set_bit(index, &completed_reqs, hba->nutrs) { |
4768 | lrbp = &hba->lrb[index]; | |
4769 | cmd = lrbp->cmd; | |
4770 | if (cmd) { | |
1a07f2d9 | 4771 | ufshcd_add_command_trace(hba, index, "complete"); |
e9d501b1 DR |
4772 | result = ufshcd_transfer_rsp_status(hba, lrbp); |
4773 | scsi_dma_unmap(cmd); | |
4774 | cmd->result = result; | |
4775 | /* Mark completed command as NULL in LRB */ | |
4776 | lrbp->cmd = NULL; | |
4777 | clear_bit_unlock(index, &hba->lrb_in_use); | |
4778 | /* Do not touch lrbp after scsi done */ | |
4779 | cmd->scsi_done(cmd); | |
1ab27c9c | 4780 | __ufshcd_release(hba); |
300bb13f JP |
4781 | } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || |
4782 | lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { | |
1a07f2d9 LS |
4783 | if (hba->dev_cmd.complete) { |
4784 | ufshcd_add_command_trace(hba, index, | |
4785 | "dev_complete"); | |
e9d501b1 | 4786 | complete(hba->dev_cmd.complete); |
1a07f2d9 | 4787 | } |
e9d501b1 | 4788 | } |
401f1e44 SJ |
4789 | if (ufshcd_is_clkscaling_supported(hba)) |
4790 | hba->clk_scaling.active_reqs--; | |
09017188 ZL |
4791 | |
4792 | lrbp->compl_time_stamp = ktime_get(); | |
e9d501b1 | 4793 | } |
7a3e97b0 SY |
4794 | |
4795 | /* clear corresponding bits of completed commands */ | |
4796 | hba->outstanding_reqs ^= completed_reqs; | |
4797 | ||
856b3483 ST |
4798 | ufshcd_clk_scaling_update_busy(hba); |
4799 | ||
5a0b0cb9 SRT |
4800 | /* we might have free'd some tags above */ |
4801 | wake_up(&hba->dev_cmd.tag_wq); | |
7a3e97b0 SY |
4802 | } |
4803 | ||
9a47ec7c YG |
4804 | /** |
4805 | * ufshcd_transfer_req_compl - handle SCSI and query command completion | |
4806 | * @hba: per adapter instance | |
4807 | */ | |
4808 | static void ufshcd_transfer_req_compl(struct ufs_hba *hba) | |
4809 | { | |
4810 | unsigned long completed_reqs; | |
4811 | u32 tr_doorbell; | |
4812 | ||
4813 | /* Resetting interrupt aggregation counters first and reading the | |
4814 | * DOOR_BELL afterward allows us to handle all the completed requests. | |
4815 | * In order to prevent other interrupts starvation the DB is read once | |
4816 | * after reset. The down side of this solution is the possibility of | |
4817 | * false interrupt if device completes another request after resetting | |
4818 | * aggregation and before reading the DB. | |
4819 | */ | |
5ac6abc9 AA |
4820 | if (ufshcd_is_intr_aggr_allowed(hba) && |
4821 | !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) | |
9a47ec7c YG |
4822 | ufshcd_reset_intr_aggr(hba); |
4823 | ||
4824 | tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); | |
4825 | completed_reqs = tr_doorbell ^ hba->outstanding_reqs; | |
4826 | ||
4827 | __ufshcd_transfer_req_compl(hba, completed_reqs); | |
4828 | } | |
4829 | ||
66ec6d59 SRT |
4830 | /** |
4831 | * ufshcd_disable_ee - disable exception event | |
4832 | * @hba: per-adapter instance | |
4833 | * @mask: exception event to disable | |
4834 | * | |
4835 | * Disables exception event in the device so that the EVENT_ALERT | |
4836 | * bit is not set. | |
4837 | * | |
4838 | * Returns zero on success, non-zero error value on failure. | |
4839 | */ | |
4840 | static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) | |
4841 | { | |
4842 | int err = 0; | |
4843 | u32 val; | |
4844 | ||
4845 | if (!(hba->ee_ctrl_mask & mask)) | |
4846 | goto out; | |
4847 | ||
4848 | val = hba->ee_ctrl_mask & ~mask; | |
d7e2ddd5 | 4849 | val &= MASK_EE_STATUS; |
5e86ae44 | 4850 | err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, |
66ec6d59 SRT |
4851 | QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); |
4852 | if (!err) | |
4853 | hba->ee_ctrl_mask &= ~mask; | |
4854 | out: | |
4855 | return err; | |
4856 | } | |
4857 | ||
4858 | /** | |
4859 | * ufshcd_enable_ee - enable exception event | |
4860 | * @hba: per-adapter instance | |
4861 | * @mask: exception event to enable | |
4862 | * | |
4863 | * Enable corresponding exception event in the device to allow | |
4864 | * device to alert host in critical scenarios. | |
4865 | * | |
4866 | * Returns zero on success, non-zero error value on failure. | |
4867 | */ | |
4868 | static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) | |
4869 | { | |
4870 | int err = 0; | |
4871 | u32 val; | |
4872 | ||
4873 | if (hba->ee_ctrl_mask & mask) | |
4874 | goto out; | |
4875 | ||
4876 | val = hba->ee_ctrl_mask | mask; | |
d7e2ddd5 | 4877 | val &= MASK_EE_STATUS; |
5e86ae44 | 4878 | err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, |
66ec6d59 SRT |
4879 | QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); |
4880 | if (!err) | |
4881 | hba->ee_ctrl_mask |= mask; | |
4882 | out: | |
4883 | return err; | |
4884 | } | |
4885 | ||
4886 | /** | |
4887 | * ufshcd_enable_auto_bkops - Allow device managed BKOPS | |
4888 | * @hba: per-adapter instance | |
4889 | * | |
4890 | * Allow device to manage background operations on its own. Enabling | |
4891 | * this might lead to inconsistent latencies during normal data transfers | |
4892 | * as the device is allowed to manage its own way of handling background | |
4893 | * operations. | |
4894 | * | |
4895 | * Returns zero on success, non-zero on failure. | |
4896 | */ | |
4897 | static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) | |
4898 | { | |
4899 | int err = 0; | |
4900 | ||
4901 | if (hba->auto_bkops_enabled) | |
4902 | goto out; | |
4903 | ||
dc3c8d3a | 4904 | err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, |
66ec6d59 SRT |
4905 | QUERY_FLAG_IDN_BKOPS_EN, NULL); |
4906 | if (err) { | |
4907 | dev_err(hba->dev, "%s: failed to enable bkops %d\n", | |
4908 | __func__, err); | |
4909 | goto out; | |
4910 | } | |
4911 | ||
4912 | hba->auto_bkops_enabled = true; | |
7ff5ab47 | 4913 | trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); |
66ec6d59 SRT |
4914 | |
4915 | /* No need of URGENT_BKOPS exception from the device */ | |
4916 | err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); | |
4917 | if (err) | |
4918 | dev_err(hba->dev, "%s: failed to disable exception event %d\n", | |
4919 | __func__, err); | |
4920 | out: | |
4921 | return err; | |
4922 | } | |
4923 | ||
4924 | /** | |
4925 | * ufshcd_disable_auto_bkops - block device in doing background operations | |
4926 | * @hba: per-adapter instance | |
4927 | * | |
4928 | * Disabling background operations improves command response latency but | |
4929 | * has drawback of device moving into critical state where the device is | |
4930 | * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the | |
4931 | * host is idle so that BKOPS are managed effectively without any negative | |
4932 | * impacts. | |
4933 | * | |
4934 | * Returns zero on success, non-zero on failure. | |
4935 | */ | |
4936 | static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) | |
4937 | { | |
4938 | int err = 0; | |
4939 | ||
4940 | if (!hba->auto_bkops_enabled) | |
4941 | goto out; | |
4942 | ||
4943 | /* | |
4944 | * If host assisted BKOPs is to be enabled, make sure | |
4945 | * urgent bkops exception is allowed. | |
4946 | */ | |
4947 | err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); | |
4948 | if (err) { | |
4949 | dev_err(hba->dev, "%s: failed to enable exception event %d\n", | |
4950 | __func__, err); | |
4951 | goto out; | |
4952 | } | |
4953 | ||
dc3c8d3a | 4954 | err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, |
66ec6d59 SRT |
4955 | QUERY_FLAG_IDN_BKOPS_EN, NULL); |
4956 | if (err) { | |
4957 | dev_err(hba->dev, "%s: failed to disable bkops %d\n", | |
4958 | __func__, err); | |
4959 | ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); | |
4960 | goto out; | |
4961 | } | |
4962 | ||
4963 | hba->auto_bkops_enabled = false; | |
7ff5ab47 | 4964 | trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); |
66ec6d59 SRT |
4965 | out: |
4966 | return err; | |
4967 | } | |
4968 | ||
4969 | /** | |
4e768e76 | 4970 | * ufshcd_force_reset_auto_bkops - force reset auto bkops state |
66ec6d59 SRT |
4971 | * @hba: per adapter instance |
4972 | * | |
4973 | * After a device reset the device may toggle the BKOPS_EN flag | |
4974 | * to default value. The s/w tracking variables should be updated | |
4e768e76 SJ |
4975 | * as well. This function would change the auto-bkops state based on |
4976 | * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND. | |
66ec6d59 | 4977 | */ |
4e768e76 | 4978 | static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) |
66ec6d59 | 4979 | { |
4e768e76 SJ |
4980 | if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) { |
4981 | hba->auto_bkops_enabled = false; | |
4982 | hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; | |
4983 | ufshcd_enable_auto_bkops(hba); | |
4984 | } else { | |
4985 | hba->auto_bkops_enabled = true; | |
4986 | hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; | |
4987 | ufshcd_disable_auto_bkops(hba); | |
4988 | } | |
66ec6d59 SRT |
4989 | } |
4990 | ||
4991 | static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) | |
4992 | { | |
5e86ae44 | 4993 | return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, |
66ec6d59 SRT |
4994 | QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); |
4995 | } | |
4996 | ||
4997 | /** | |
57d104c1 | 4998 | * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status |
66ec6d59 | 4999 | * @hba: per-adapter instance |
57d104c1 | 5000 | * @status: bkops_status value |
66ec6d59 | 5001 | * |
57d104c1 SJ |
5002 | * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn |
5003 | * flag in the device to permit background operations if the device | |
5004 | * bkops_status is greater than or equal to "status" argument passed to | |
5005 | * this function, disable otherwise. | |
5006 | * | |
5007 | * Returns 0 for success, non-zero in case of failure. | |
5008 | * | |
5009 | * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag | |
5010 | * to know whether auto bkops is enabled or disabled after this function | |
5011 | * returns control to it. | |
66ec6d59 | 5012 | */ |
57d104c1 SJ |
5013 | static int ufshcd_bkops_ctrl(struct ufs_hba *hba, |
5014 | enum bkops_status status) | |
66ec6d59 SRT |
5015 | { |
5016 | int err; | |
57d104c1 | 5017 | u32 curr_status = 0; |
66ec6d59 | 5018 | |
57d104c1 | 5019 | err = ufshcd_get_bkops_status(hba, &curr_status); |
66ec6d59 SRT |
5020 | if (err) { |
5021 | dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", | |
5022 | __func__, err); | |
5023 | goto out; | |
57d104c1 SJ |
5024 | } else if (curr_status > BKOPS_STATUS_MAX) { |
5025 | dev_err(hba->dev, "%s: invalid BKOPS status %d\n", | |
5026 | __func__, curr_status); | |
5027 | err = -EINVAL; | |
5028 | goto out; | |
66ec6d59 SRT |
5029 | } |
5030 | ||
57d104c1 | 5031 | if (curr_status >= status) |
66ec6d59 | 5032 | err = ufshcd_enable_auto_bkops(hba); |
57d104c1 SJ |
5033 | else |
5034 | err = ufshcd_disable_auto_bkops(hba); | |
66ec6d59 SRT |
5035 | out: |
5036 | return err; | |
5037 | } | |
5038 | ||
57d104c1 SJ |
5039 | /** |
5040 | * ufshcd_urgent_bkops - handle urgent bkops exception event | |
5041 | * @hba: per-adapter instance | |
5042 | * | |
5043 | * Enable fBackgroundOpsEn flag in the device to permit background | |
5044 | * operations. | |
5045 | * | |
5046 | * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled | |
5047 | * and negative error value for any other failure. | |
5048 | */ | |
5049 | static int ufshcd_urgent_bkops(struct ufs_hba *hba) | |
5050 | { | |
afdfff59 | 5051 | return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); |
57d104c1 SJ |
5052 | } |
5053 | ||
66ec6d59 SRT |
5054 | static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) |
5055 | { | |
5e86ae44 | 5056 | return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, |
66ec6d59 SRT |
5057 | QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); |
5058 | } | |
5059 | ||
afdfff59 YG |
5060 | static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) |
5061 | { | |
5062 | int err; | |
5063 | u32 curr_status = 0; | |
5064 | ||
5065 | if (hba->is_urgent_bkops_lvl_checked) | |
5066 | goto enable_auto_bkops; | |
5067 | ||
5068 | err = ufshcd_get_bkops_status(hba, &curr_status); | |
5069 | if (err) { | |
5070 | dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", | |
5071 | __func__, err); | |
5072 | goto out; | |
5073 | } | |
5074 | ||
5075 | /* | |
5076 | * We are seeing that some devices are raising the urgent bkops | |
5077 | * exception events even when BKOPS status doesn't indicate performace | |
5078 | * impacted or critical. Handle these device by determining their urgent | |
5079 | * bkops status at runtime. | |
5080 | */ | |
5081 | if (curr_status < BKOPS_STATUS_PERF_IMPACT) { | |
5082 | dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", | |
5083 | __func__, curr_status); | |
5084 | /* update the current status as the urgent bkops level */ | |
5085 | hba->urgent_bkops_lvl = curr_status; | |
5086 | hba->is_urgent_bkops_lvl_checked = true; | |
5087 | } | |
5088 | ||
5089 | enable_auto_bkops: | |
5090 | err = ufshcd_enable_auto_bkops(hba); | |
5091 | out: | |
5092 | if (err < 0) | |
5093 | dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", | |
5094 | __func__, err); | |
5095 | } | |
5096 | ||
66ec6d59 SRT |
5097 | /** |
5098 | * ufshcd_exception_event_handler - handle exceptions raised by device | |
5099 | * @work: pointer to work data | |
5100 | * | |
5101 | * Read bExceptionEventStatus attribute from the device and handle the | |
5102 | * exception event accordingly. | |
5103 | */ | |
5104 | static void ufshcd_exception_event_handler(struct work_struct *work) | |
5105 | { | |
5106 | struct ufs_hba *hba; | |
5107 | int err; | |
5108 | u32 status = 0; | |
5109 | hba = container_of(work, struct ufs_hba, eeh_work); | |
5110 | ||
62694735 | 5111 | pm_runtime_get_sync(hba->dev); |
2e3611e9 | 5112 | scsi_block_requests(hba->host); |
66ec6d59 SRT |
5113 | err = ufshcd_get_ee_status(hba, &status); |
5114 | if (err) { | |
5115 | dev_err(hba->dev, "%s: failed to get exception status %d\n", | |
5116 | __func__, err); | |
5117 | goto out; | |
5118 | } | |
5119 | ||
5120 | status &= hba->ee_ctrl_mask; | |
afdfff59 YG |
5121 | |
5122 | if (status & MASK_EE_URGENT_BKOPS) | |
5123 | ufshcd_bkops_exception_event_handler(hba); | |
5124 | ||
66ec6d59 | 5125 | out: |
2e3611e9 | 5126 | scsi_unblock_requests(hba->host); |
62694735 | 5127 | pm_runtime_put_sync(hba->dev); |
66ec6d59 SRT |
5128 | return; |
5129 | } | |
5130 | ||
9a47ec7c YG |
5131 | /* Complete requests that have door-bell cleared */ |
5132 | static void ufshcd_complete_requests(struct ufs_hba *hba) | |
5133 | { | |
5134 | ufshcd_transfer_req_compl(hba); | |
5135 | ufshcd_tmc_handler(hba); | |
5136 | } | |
5137 | ||
583fa62d YG |
5138 | /** |
5139 | * ufshcd_quirk_dl_nac_errors - This function checks if error handling is | |
5140 | * to recover from the DL NAC errors or not. | |
5141 | * @hba: per-adapter instance | |
5142 | * | |
5143 | * Returns true if error handling is required, false otherwise | |
5144 | */ | |
5145 | static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) | |
5146 | { | |
5147 | unsigned long flags; | |
5148 | bool err_handling = true; | |
5149 | ||
5150 | spin_lock_irqsave(hba->host->host_lock, flags); | |
5151 | /* | |
5152 | * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the | |
5153 | * device fatal error and/or DL NAC & REPLAY timeout errors. | |
5154 | */ | |
5155 | if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) | |
5156 | goto out; | |
5157 | ||
5158 | if ((hba->saved_err & DEVICE_FATAL_ERROR) || | |
5159 | ((hba->saved_err & UIC_ERROR) && | |
5160 | (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) | |
5161 | goto out; | |
5162 | ||
5163 | if ((hba->saved_err & UIC_ERROR) && | |
5164 | (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { | |
5165 | int err; | |
5166 | /* | |
5167 | * wait for 50ms to see if we can get any other errors or not. | |
5168 | */ | |
5169 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
5170 | msleep(50); | |
5171 | spin_lock_irqsave(hba->host->host_lock, flags); | |
5172 | ||
5173 | /* | |
5174 | * now check if we have got any other severe errors other than | |
5175 | * DL NAC error? | |
5176 | */ | |
5177 | if ((hba->saved_err & INT_FATAL_ERRORS) || | |
5178 | ((hba->saved_err & UIC_ERROR) && | |
5179 | (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) | |
5180 | goto out; | |
5181 | ||
5182 | /* | |
5183 | * As DL NAC is the only error received so far, send out NOP | |
5184 | * command to confirm if link is still active or not. | |
5185 | * - If we don't get any response then do error recovery. | |
5186 | * - If we get response then clear the DL NAC error bit. | |
5187 | */ | |
5188 | ||
5189 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
5190 | err = ufshcd_verify_dev_init(hba); | |
5191 | spin_lock_irqsave(hba->host->host_lock, flags); | |
5192 | ||
5193 | if (err) | |
5194 | goto out; | |
5195 | ||
5196 | /* Link seems to be alive hence ignore the DL NAC errors */ | |
5197 | if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) | |
5198 | hba->saved_err &= ~UIC_ERROR; | |
5199 | /* clear NAC error */ | |
5200 | hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; | |
5201 | if (!hba->saved_uic_err) { | |
5202 | err_handling = false; | |
5203 | goto out; | |
5204 | } | |
5205 | } | |
5206 | out: | |
5207 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
5208 | return err_handling; | |
5209 | } | |
5210 | ||
7a3e97b0 | 5211 | /** |
e8e7f271 SRT |
5212 | * ufshcd_err_handler - handle UFS errors that require s/w attention |
5213 | * @work: pointer to work structure | |
7a3e97b0 | 5214 | */ |
e8e7f271 | 5215 | static void ufshcd_err_handler(struct work_struct *work) |
7a3e97b0 SY |
5216 | { |
5217 | struct ufs_hba *hba; | |
e8e7f271 SRT |
5218 | unsigned long flags; |
5219 | u32 err_xfer = 0; | |
5220 | u32 err_tm = 0; | |
5221 | int err = 0; | |
5222 | int tag; | |
9a47ec7c | 5223 | bool needs_reset = false; |
e8e7f271 SRT |
5224 | |
5225 | hba = container_of(work, struct ufs_hba, eh_work); | |
7a3e97b0 | 5226 | |
62694735 | 5227 | pm_runtime_get_sync(hba->dev); |
1ab27c9c | 5228 | ufshcd_hold(hba, false); |
e8e7f271 SRT |
5229 | |
5230 | spin_lock_irqsave(hba->host->host_lock, flags); | |
9a47ec7c | 5231 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) |
e8e7f271 | 5232 | goto out; |
e8e7f271 SRT |
5233 | |
5234 | hba->ufshcd_state = UFSHCD_STATE_RESET; | |
5235 | ufshcd_set_eh_in_progress(hba); | |
5236 | ||
5237 | /* Complete requests that have door-bell cleared by h/w */ | |
9a47ec7c | 5238 | ufshcd_complete_requests(hba); |
583fa62d YG |
5239 | |
5240 | if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { | |
5241 | bool ret; | |
5242 | ||
5243 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
5244 | /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */ | |
5245 | ret = ufshcd_quirk_dl_nac_errors(hba); | |
5246 | spin_lock_irqsave(hba->host->host_lock, flags); | |
5247 | if (!ret) | |
5248 | goto skip_err_handling; | |
5249 | } | |
9a47ec7c YG |
5250 | if ((hba->saved_err & INT_FATAL_ERRORS) || |
5251 | ((hba->saved_err & UIC_ERROR) && | |
5252 | (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR | | |
5253 | UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | | |
5254 | UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) | |
5255 | needs_reset = true; | |
e8e7f271 | 5256 | |
9a47ec7c YG |
5257 | /* |
5258 | * if host reset is required then skip clearing the pending | |
5259 | * transfers forcefully because they will automatically get | |
5260 | * cleared after link startup. | |
5261 | */ | |
5262 | if (needs_reset) | |
5263 | goto skip_pending_xfer_clear; | |
5264 | ||
5265 | /* release lock as clear command might sleep */ | |
5266 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
e8e7f271 | 5267 | /* Clear pending transfer requests */ |
9a47ec7c YG |
5268 | for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { |
5269 | if (ufshcd_clear_cmd(hba, tag)) { | |
5270 | err_xfer = true; | |
5271 | goto lock_skip_pending_xfer_clear; | |
5272 | } | |
5273 | } | |
e8e7f271 SRT |
5274 | |
5275 | /* Clear pending task management requests */ | |
9a47ec7c YG |
5276 | for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { |
5277 | if (ufshcd_clear_tm_cmd(hba, tag)) { | |
5278 | err_tm = true; | |
5279 | goto lock_skip_pending_xfer_clear; | |
5280 | } | |
5281 | } | |
e8e7f271 | 5282 | |
9a47ec7c | 5283 | lock_skip_pending_xfer_clear: |
e8e7f271 | 5284 | spin_lock_irqsave(hba->host->host_lock, flags); |
e8e7f271 | 5285 | |
9a47ec7c YG |
5286 | /* Complete the requests that are cleared by s/w */ |
5287 | ufshcd_complete_requests(hba); | |
5288 | ||
5289 | if (err_xfer || err_tm) | |
5290 | needs_reset = true; | |
5291 | ||
5292 | skip_pending_xfer_clear: | |
e8e7f271 | 5293 | /* Fatal errors need reset */ |
9a47ec7c YG |
5294 | if (needs_reset) { |
5295 | unsigned long max_doorbells = (1UL << hba->nutrs) - 1; | |
5296 | ||
5297 | /* | |
5298 | * ufshcd_reset_and_restore() does the link reinitialization | |
5299 | * which will need atleast one empty doorbell slot to send the | |
5300 | * device management commands (NOP and query commands). | |
5301 | * If there is no slot empty at this moment then free up last | |
5302 | * slot forcefully. | |
5303 | */ | |
5304 | if (hba->outstanding_reqs == max_doorbells) | |
5305 | __ufshcd_transfer_req_compl(hba, | |
5306 | (1UL << (hba->nutrs - 1))); | |
5307 | ||
5308 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
e8e7f271 | 5309 | err = ufshcd_reset_and_restore(hba); |
9a47ec7c | 5310 | spin_lock_irqsave(hba->host->host_lock, flags); |
e8e7f271 SRT |
5311 | if (err) { |
5312 | dev_err(hba->dev, "%s: reset and restore failed\n", | |
5313 | __func__); | |
5314 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | |
5315 | } | |
5316 | /* | |
5317 | * Inform scsi mid-layer that we did reset and allow to handle | |
5318 | * Unit Attention properly. | |
5319 | */ | |
5320 | scsi_report_bus_reset(hba->host, 0); | |
5321 | hba->saved_err = 0; | |
5322 | hba->saved_uic_err = 0; | |
5323 | } | |
9a47ec7c | 5324 | |
583fa62d | 5325 | skip_err_handling: |
9a47ec7c YG |
5326 | if (!needs_reset) { |
5327 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; | |
5328 | if (hba->saved_err || hba->saved_uic_err) | |
5329 | dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", | |
5330 | __func__, hba->saved_err, hba->saved_uic_err); | |
5331 | } | |
5332 | ||
e8e7f271 SRT |
5333 | ufshcd_clear_eh_in_progress(hba); |
5334 | ||
5335 | out: | |
9a47ec7c | 5336 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
38135535 | 5337 | ufshcd_scsi_unblock_requests(hba); |
1ab27c9c | 5338 | ufshcd_release(hba); |
62694735 | 5339 | pm_runtime_put_sync(hba->dev); |
7a3e97b0 SY |
5340 | } |
5341 | ||
ff8e20c6 DR |
5342 | static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist, |
5343 | u32 reg) | |
5344 | { | |
5345 | reg_hist->reg[reg_hist->pos] = reg; | |
5346 | reg_hist->tstamp[reg_hist->pos] = ktime_get(); | |
5347 | reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH; | |
5348 | } | |
5349 | ||
7a3e97b0 | 5350 | /** |
e8e7f271 SRT |
5351 | * ufshcd_update_uic_error - check and set fatal UIC error flags. |
5352 | * @hba: per-adapter instance | |
7a3e97b0 | 5353 | */ |
e8e7f271 | 5354 | static void ufshcd_update_uic_error(struct ufs_hba *hba) |
7a3e97b0 SY |
5355 | { |
5356 | u32 reg; | |
5357 | ||
fb7b45f0 DR |
5358 | /* PHY layer lane error */ |
5359 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); | |
5360 | /* Ignore LINERESET indication, as this is not an error */ | |
5361 | if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) && | |
ff8e20c6 | 5362 | (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) { |
fb7b45f0 DR |
5363 | /* |
5364 | * To know whether this error is fatal or not, DB timeout | |
5365 | * must be checked but this error is handled separately. | |
5366 | */ | |
5367 | dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__); | |
ff8e20c6 DR |
5368 | ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg); |
5369 | } | |
fb7b45f0 | 5370 | |
e8e7f271 SRT |
5371 | /* PA_INIT_ERROR is fatal and needs UIC reset */ |
5372 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); | |
ff8e20c6 DR |
5373 | if (reg) |
5374 | ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg); | |
5375 | ||
e8e7f271 SRT |
5376 | if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) |
5377 | hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; | |
583fa62d YG |
5378 | else if (hba->dev_quirks & |
5379 | UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { | |
5380 | if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) | |
5381 | hba->uic_error |= | |
5382 | UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; | |
5383 | else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) | |
5384 | hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; | |
5385 | } | |
e8e7f271 SRT |
5386 | |
5387 | /* UIC NL/TL/DME errors needs software retry */ | |
5388 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); | |
ff8e20c6 DR |
5389 | if (reg) { |
5390 | ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg); | |
e8e7f271 | 5391 | hba->uic_error |= UFSHCD_UIC_NL_ERROR; |
ff8e20c6 | 5392 | } |
e8e7f271 SRT |
5393 | |
5394 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); | |
ff8e20c6 DR |
5395 | if (reg) { |
5396 | ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg); | |
e8e7f271 | 5397 | hba->uic_error |= UFSHCD_UIC_TL_ERROR; |
ff8e20c6 | 5398 | } |
e8e7f271 SRT |
5399 | |
5400 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); | |
ff8e20c6 DR |
5401 | if (reg) { |
5402 | ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg); | |
e8e7f271 | 5403 | hba->uic_error |= UFSHCD_UIC_DME_ERROR; |
ff8e20c6 | 5404 | } |
e8e7f271 SRT |
5405 | |
5406 | dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", | |
5407 | __func__, hba->uic_error); | |
5408 | } | |
5409 | ||
5410 | /** | |
5411 | * ufshcd_check_errors - Check for errors that need s/w attention | |
5412 | * @hba: per-adapter instance | |
5413 | */ | |
5414 | static void ufshcd_check_errors(struct ufs_hba *hba) | |
5415 | { | |
5416 | bool queue_eh_work = false; | |
5417 | ||
7a3e97b0 | 5418 | if (hba->errors & INT_FATAL_ERRORS) |
e8e7f271 | 5419 | queue_eh_work = true; |
7a3e97b0 SY |
5420 | |
5421 | if (hba->errors & UIC_ERROR) { | |
e8e7f271 SRT |
5422 | hba->uic_error = 0; |
5423 | ufshcd_update_uic_error(hba); | |
5424 | if (hba->uic_error) | |
5425 | queue_eh_work = true; | |
7a3e97b0 | 5426 | } |
e8e7f271 SRT |
5427 | |
5428 | if (queue_eh_work) { | |
9a47ec7c YG |
5429 | /* |
5430 | * update the transfer error masks to sticky bits, let's do this | |
5431 | * irrespective of current ufshcd_state. | |
5432 | */ | |
5433 | hba->saved_err |= hba->errors; | |
5434 | hba->saved_uic_err |= hba->uic_error; | |
5435 | ||
e8e7f271 SRT |
5436 | /* handle fatal errors only when link is functional */ |
5437 | if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { | |
5438 | /* block commands from scsi mid-layer */ | |
38135535 | 5439 | ufshcd_scsi_block_requests(hba); |
e8e7f271 | 5440 | |
141f8165 | 5441 | hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED; |
66cc820f DR |
5442 | |
5443 | /* dump controller state before resetting */ | |
5444 | if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) { | |
5445 | bool pr_prdt = !!(hba->saved_err & | |
5446 | SYSTEM_BUS_FATAL_ERROR); | |
5447 | ||
5448 | dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n", | |
5449 | __func__, hba->saved_err, | |
5450 | hba->saved_uic_err); | |
5451 | ||
5452 | ufshcd_print_host_regs(hba); | |
5453 | ufshcd_print_pwr_info(hba); | |
5454 | ufshcd_print_tmrs(hba, hba->outstanding_tasks); | |
5455 | ufshcd_print_trs(hba, hba->outstanding_reqs, | |
5456 | pr_prdt); | |
5457 | } | |
e8e7f271 SRT |
5458 | schedule_work(&hba->eh_work); |
5459 | } | |
3441da7d | 5460 | } |
e8e7f271 SRT |
5461 | /* |
5462 | * if (!queue_eh_work) - | |
5463 | * Other errors are either non-fatal where host recovers | |
5464 | * itself without s/w intervention or errors that will be | |
5465 | * handled by the SCSI core layer. | |
5466 | */ | |
7a3e97b0 SY |
5467 | } |
5468 | ||
5469 | /** | |
5470 | * ufshcd_tmc_handler - handle task management function completion | |
5471 | * @hba: per adapter instance | |
5472 | */ | |
5473 | static void ufshcd_tmc_handler(struct ufs_hba *hba) | |
5474 | { | |
5475 | u32 tm_doorbell; | |
5476 | ||
b873a275 | 5477 | tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); |
7a3e97b0 | 5478 | hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; |
e2933132 | 5479 | wake_up(&hba->tm_wq); |
7a3e97b0 SY |
5480 | } |
5481 | ||
5482 | /** | |
5483 | * ufshcd_sl_intr - Interrupt service routine | |
5484 | * @hba: per adapter instance | |
5485 | * @intr_status: contains interrupts generated by the controller | |
5486 | */ | |
5487 | static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) | |
5488 | { | |
5489 | hba->errors = UFSHCD_ERROR_MASK & intr_status; | |
5490 | if (hba->errors) | |
e8e7f271 | 5491 | ufshcd_check_errors(hba); |
7a3e97b0 | 5492 | |
53b3d9c3 SJ |
5493 | if (intr_status & UFSHCD_UIC_MASK) |
5494 | ufshcd_uic_cmd_compl(hba, intr_status); | |
7a3e97b0 SY |
5495 | |
5496 | if (intr_status & UTP_TASK_REQ_COMPL) | |
5497 | ufshcd_tmc_handler(hba); | |
5498 | ||
5499 | if (intr_status & UTP_TRANSFER_REQ_COMPL) | |
5500 | ufshcd_transfer_req_compl(hba); | |
5501 | } | |
5502 | ||
5503 | /** | |
5504 | * ufshcd_intr - Main interrupt service routine | |
5505 | * @irq: irq number | |
5506 | * @__hba: pointer to adapter instance | |
5507 | * | |
5508 | * Returns IRQ_HANDLED - If interrupt is valid | |
5509 | * IRQ_NONE - If invalid interrupt | |
5510 | */ | |
5511 | static irqreturn_t ufshcd_intr(int irq, void *__hba) | |
5512 | { | |
d75f7fe4 | 5513 | u32 intr_status, enabled_intr_status; |
7a3e97b0 SY |
5514 | irqreturn_t retval = IRQ_NONE; |
5515 | struct ufs_hba *hba = __hba; | |
7f6ba4f1 | 5516 | int retries = hba->nutrs; |
7a3e97b0 SY |
5517 | |
5518 | spin_lock(hba->host->host_lock); | |
b873a275 | 5519 | intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); |
7a3e97b0 | 5520 | |
7f6ba4f1 VG |
5521 | /* |
5522 | * There could be max of hba->nutrs reqs in flight and in worst case | |
5523 | * if the reqs get finished 1 by 1 after the interrupt status is | |
5524 | * read, make sure we handle them by checking the interrupt status | |
5525 | * again in a loop until we process all of the reqs before returning. | |
5526 | */ | |
5527 | do { | |
5528 | enabled_intr_status = | |
5529 | intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); | |
5530 | if (intr_status) | |
5531 | ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); | |
5532 | if (enabled_intr_status) { | |
5533 | ufshcd_sl_intr(hba, enabled_intr_status); | |
5534 | retval = IRQ_HANDLED; | |
5535 | } | |
5536 | ||
5537 | intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); | |
5538 | } while (intr_status && --retries); | |
d75f7fe4 | 5539 | |
7a3e97b0 SY |
5540 | spin_unlock(hba->host->host_lock); |
5541 | return retval; | |
5542 | } | |
5543 | ||
e2933132 SRT |
5544 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) |
5545 | { | |
5546 | int err = 0; | |
5547 | u32 mask = 1 << tag; | |
5548 | unsigned long flags; | |
5549 | ||
5550 | if (!test_bit(tag, &hba->outstanding_tasks)) | |
5551 | goto out; | |
5552 | ||
5553 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1399c5b0 | 5554 | ufshcd_utmrl_clear(hba, tag); |
e2933132 SRT |
5555 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
5556 | ||
5557 | /* poll for max. 1 sec to clear door bell register by h/w */ | |
5558 | err = ufshcd_wait_for_register(hba, | |
5559 | REG_UTP_TASK_REQ_DOOR_BELL, | |
596585a2 | 5560 | mask, 0, 1000, 1000, true); |
e2933132 SRT |
5561 | out: |
5562 | return err; | |
5563 | } | |
5564 | ||
c6049cd9 CH |
5565 | static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, |
5566 | struct utp_task_req_desc *treq, u8 tm_function) | |
7a3e97b0 | 5567 | { |
c6049cd9 | 5568 | struct Scsi_Host *host = hba->host; |
7a3e97b0 | 5569 | unsigned long flags; |
c6049cd9 | 5570 | int free_slot, task_tag, err; |
7a3e97b0 | 5571 | |
e2933132 SRT |
5572 | /* |
5573 | * Get free slot, sleep if slots are unavailable. | |
5574 | * Even though we use wait_event() which sleeps indefinitely, | |
5575 | * the maximum wait time is bounded by %TM_CMD_TIMEOUT. | |
5576 | */ | |
5577 | wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); | |
1ab27c9c | 5578 | ufshcd_hold(hba, false); |
7a3e97b0 | 5579 | |
e2933132 | 5580 | spin_lock_irqsave(host->host_lock, flags); |
e2933132 | 5581 | task_tag = hba->nutrs + free_slot; |
7a3e97b0 | 5582 | |
c6049cd9 CH |
5583 | treq->req_header.dword_0 |= cpu_to_be32(task_tag); |
5584 | ||
5585 | memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq)); | |
d2877be4 KK |
5586 | ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function); |
5587 | ||
7a3e97b0 SY |
5588 | /* send command to the controller */ |
5589 | __set_bit(free_slot, &hba->outstanding_tasks); | |
897efe62 YG |
5590 | |
5591 | /* Make sure descriptors are ready before ringing the task doorbell */ | |
5592 | wmb(); | |
5593 | ||
b873a275 | 5594 | ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); |
ad1a1b9c GB |
5595 | /* Make sure that doorbell is committed immediately */ |
5596 | wmb(); | |
7a3e97b0 SY |
5597 | |
5598 | spin_unlock_irqrestore(host->host_lock, flags); | |
5599 | ||
6667e6d9 OS |
5600 | ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send"); |
5601 | ||
7a3e97b0 | 5602 | /* wait until the task management command is completed */ |
e2933132 SRT |
5603 | err = wait_event_timeout(hba->tm_wq, |
5604 | test_bit(free_slot, &hba->tm_condition), | |
5605 | msecs_to_jiffies(TM_CMD_TIMEOUT)); | |
7a3e97b0 | 5606 | if (!err) { |
6667e6d9 | 5607 | ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err"); |
e2933132 SRT |
5608 | dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", |
5609 | __func__, tm_function); | |
5610 | if (ufshcd_clear_tm_cmd(hba, free_slot)) | |
5611 | dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", | |
5612 | __func__, free_slot); | |
5613 | err = -ETIMEDOUT; | |
5614 | } else { | |
c6049cd9 CH |
5615 | err = 0; |
5616 | memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq)); | |
5617 | ||
6667e6d9 | 5618 | ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete"); |
c6049cd9 CH |
5619 | |
5620 | spin_lock_irqsave(hba->host->host_lock, flags); | |
5621 | __clear_bit(free_slot, &hba->outstanding_tasks); | |
5622 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
5623 | ||
7a3e97b0 | 5624 | } |
e2933132 | 5625 | |
7a3e97b0 | 5626 | clear_bit(free_slot, &hba->tm_condition); |
e2933132 SRT |
5627 | ufshcd_put_tm_slot(hba, free_slot); |
5628 | wake_up(&hba->tm_tag_wq); | |
5629 | ||
1ab27c9c | 5630 | ufshcd_release(hba); |
7a3e97b0 SY |
5631 | return err; |
5632 | } | |
5633 | ||
c6049cd9 CH |
5634 | /** |
5635 | * ufshcd_issue_tm_cmd - issues task management commands to controller | |
5636 | * @hba: per adapter instance | |
5637 | * @lun_id: LUN ID to which TM command is sent | |
5638 | * @task_id: task ID to which the TM command is applicable | |
5639 | * @tm_function: task management function opcode | |
5640 | * @tm_response: task management service response return value | |
5641 | * | |
5642 | * Returns non-zero value on error, zero on success. | |
5643 | */ | |
5644 | static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, | |
5645 | u8 tm_function, u8 *tm_response) | |
5646 | { | |
5647 | struct utp_task_req_desc treq = { { 0 }, }; | |
5648 | int ocs_value, err; | |
5649 | ||
5650 | /* Configure task request descriptor */ | |
5651 | treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); | |
5652 | treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | |
5653 | ||
5654 | /* Configure task request UPIU */ | |
5655 | treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) | | |
5656 | cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24); | |
5657 | treq.req_header.dword_1 = cpu_to_be32(tm_function << 16); | |
5658 | ||
5659 | /* | |
5660 | * The host shall provide the same value for LUN field in the basic | |
5661 | * header and for Input Parameter. | |
5662 | */ | |
5663 | treq.input_param1 = cpu_to_be32(lun_id); | |
5664 | treq.input_param2 = cpu_to_be32(task_id); | |
5665 | ||
5666 | err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function); | |
5667 | if (err == -ETIMEDOUT) | |
5668 | return err; | |
5669 | ||
5670 | ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS; | |
5671 | if (ocs_value != OCS_SUCCESS) | |
5672 | dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", | |
5673 | __func__, ocs_value); | |
5674 | else if (tm_response) | |
5675 | *tm_response = be32_to_cpu(treq.output_param1) & | |
5676 | MASK_TM_SERVICE_RESP; | |
5677 | return err; | |
5678 | } | |
5679 | ||
5e0a86ee AA |
5680 | /** |
5681 | * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests | |
5682 | * @hba: per-adapter instance | |
5683 | * @req_upiu: upiu request | |
5684 | * @rsp_upiu: upiu reply | |
5685 | * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target | |
5686 | * @desc_buff: pointer to descriptor buffer, NULL if NA | |
5687 | * @buff_len: descriptor size, 0 if NA | |
5688 | * @desc_op: descriptor operation | |
5689 | * | |
5690 | * Those type of requests uses UTP Transfer Request Descriptor - utrd. | |
5691 | * Therefore, it "rides" the device management infrastructure: uses its tag and | |
5692 | * tasks work queues. | |
5693 | * | |
5694 | * Since there is only one available tag for device management commands, | |
5695 | * the caller is expected to hold the hba->dev_cmd.lock mutex. | |
5696 | */ | |
5697 | static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, | |
5698 | struct utp_upiu_req *req_upiu, | |
5699 | struct utp_upiu_req *rsp_upiu, | |
5700 | u8 *desc_buff, int *buff_len, | |
5701 | int cmd_type, | |
5702 | enum query_opcode desc_op) | |
5703 | { | |
5704 | struct ufshcd_lrb *lrbp; | |
5705 | int err = 0; | |
5706 | int tag; | |
5707 | struct completion wait; | |
5708 | unsigned long flags; | |
5709 | u32 upiu_flags; | |
5710 | ||
5711 | down_read(&hba->clk_scaling_lock); | |
5712 | ||
5713 | wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); | |
5714 | ||
5715 | init_completion(&wait); | |
5716 | lrbp = &hba->lrb[tag]; | |
5717 | WARN_ON(lrbp->cmd); | |
5718 | ||
5719 | lrbp->cmd = NULL; | |
5720 | lrbp->sense_bufflen = 0; | |
5721 | lrbp->sense_buffer = NULL; | |
5722 | lrbp->task_tag = tag; | |
5723 | lrbp->lun = 0; | |
5724 | lrbp->intr_cmd = true; | |
5725 | hba->dev_cmd.type = cmd_type; | |
5726 | ||
5727 | switch (hba->ufs_version) { | |
5728 | case UFSHCI_VERSION_10: | |
5729 | case UFSHCI_VERSION_11: | |
5730 | lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; | |
5731 | break; | |
5732 | default: | |
5733 | lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; | |
5734 | break; | |
5735 | } | |
5736 | ||
5737 | /* update the task tag in the request upiu */ | |
5738 | req_upiu->header.dword_0 |= cpu_to_be32(tag); | |
5739 | ||
5740 | ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); | |
5741 | ||
5742 | /* just copy the upiu request as it is */ | |
5743 | memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr)); | |
5744 | if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) { | |
5745 | /* The Data Segment Area is optional depending upon the query | |
5746 | * function value. for WRITE DESCRIPTOR, the data segment | |
5747 | * follows right after the tsf. | |
5748 | */ | |
5749 | memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len); | |
5750 | *buff_len = 0; | |
5751 | } | |
5752 | ||
5753 | memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); | |
5754 | ||
5755 | hba->dev_cmd.complete = &wait; | |
5756 | ||
5757 | /* Make sure descriptors are ready before ringing the doorbell */ | |
5758 | wmb(); | |
5759 | spin_lock_irqsave(hba->host->host_lock, flags); | |
5760 | ufshcd_send_command(hba, tag); | |
5761 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
5762 | ||
5763 | /* | |
5764 | * ignore the returning value here - ufshcd_check_query_response is | |
5765 | * bound to fail since dev_cmd.query and dev_cmd.type were left empty. | |
5766 | * read the response directly ignoring all errors. | |
5767 | */ | |
5768 | ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT); | |
5769 | ||
5770 | /* just copy the upiu response as it is */ | |
5771 | memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); | |
5772 | ||
5773 | ufshcd_put_dev_cmd_tag(hba, tag); | |
5774 | wake_up(&hba->dev_cmd.tag_wq); | |
5775 | up_read(&hba->clk_scaling_lock); | |
5776 | return err; | |
5777 | } | |
5778 | ||
5779 | /** | |
5780 | * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands | |
5781 | * @hba: per-adapter instance | |
5782 | * @req_upiu: upiu request | |
5783 | * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands | |
5784 | * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target | |
5785 | * @desc_buff: pointer to descriptor buffer, NULL if NA | |
5786 | * @buff_len: descriptor size, 0 if NA | |
5787 | * @desc_op: descriptor operation | |
5788 | * | |
5789 | * Supports UTP Transfer requests (nop and query), and UTP Task | |
5790 | * Management requests. | |
5791 | * It is up to the caller to fill the upiu conent properly, as it will | |
5792 | * be copied without any further input validations. | |
5793 | */ | |
5794 | int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, | |
5795 | struct utp_upiu_req *req_upiu, | |
5796 | struct utp_upiu_req *rsp_upiu, | |
5797 | int msgcode, | |
5798 | u8 *desc_buff, int *buff_len, | |
5799 | enum query_opcode desc_op) | |
5800 | { | |
5801 | int err; | |
5802 | int cmd_type = DEV_CMD_TYPE_QUERY; | |
5803 | struct utp_task_req_desc treq = { { 0 }, }; | |
5804 | int ocs_value; | |
5805 | u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC; | |
5806 | ||
5807 | if (desc_buff && desc_op != UPIU_QUERY_OPCODE_WRITE_DESC) { | |
5808 | err = -ENOTSUPP; | |
5809 | goto out; | |
5810 | } | |
5811 | ||
5812 | switch (msgcode) { | |
5813 | case UPIU_TRANSACTION_NOP_OUT: | |
5814 | cmd_type = DEV_CMD_TYPE_NOP; | |
5815 | /* fall through */ | |
5816 | case UPIU_TRANSACTION_QUERY_REQ: | |
5817 | ufshcd_hold(hba, false); | |
5818 | mutex_lock(&hba->dev_cmd.lock); | |
5819 | err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, | |
5820 | desc_buff, buff_len, | |
5821 | cmd_type, desc_op); | |
5822 | mutex_unlock(&hba->dev_cmd.lock); | |
5823 | ufshcd_release(hba); | |
5824 | ||
5825 | break; | |
5826 | case UPIU_TRANSACTION_TASK_REQ: | |
5827 | treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); | |
5828 | treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | |
5829 | ||
5830 | memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu)); | |
5831 | ||
5832 | err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f); | |
5833 | if (err == -ETIMEDOUT) | |
5834 | break; | |
5835 | ||
5836 | ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS; | |
5837 | if (ocs_value != OCS_SUCCESS) { | |
5838 | dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__, | |
5839 | ocs_value); | |
5840 | break; | |
5841 | } | |
5842 | ||
5843 | memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu)); | |
5844 | ||
5845 | break; | |
5846 | default: | |
5847 | err = -EINVAL; | |
5848 | ||
5849 | break; | |
5850 | } | |
5851 | ||
5852 | out: | |
5853 | return err; | |
5854 | } | |
5855 | ||
7a3e97b0 | 5856 | /** |
3441da7d SRT |
5857 | * ufshcd_eh_device_reset_handler - device reset handler registered to |
5858 | * scsi layer. | |
7a3e97b0 SY |
5859 | * @cmd: SCSI command pointer |
5860 | * | |
5861 | * Returns SUCCESS/FAILED | |
5862 | */ | |
3441da7d | 5863 | static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) |
7a3e97b0 SY |
5864 | { |
5865 | struct Scsi_Host *host; | |
5866 | struct ufs_hba *hba; | |
5867 | unsigned int tag; | |
5868 | u32 pos; | |
5869 | int err; | |
e2933132 SRT |
5870 | u8 resp = 0xF; |
5871 | struct ufshcd_lrb *lrbp; | |
3441da7d | 5872 | unsigned long flags; |
7a3e97b0 SY |
5873 | |
5874 | host = cmd->device->host; | |
5875 | hba = shost_priv(host); | |
5876 | tag = cmd->request->tag; | |
5877 | ||
e2933132 SRT |
5878 | lrbp = &hba->lrb[tag]; |
5879 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); | |
5880 | if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { | |
3441da7d SRT |
5881 | if (!err) |
5882 | err = resp; | |
7a3e97b0 | 5883 | goto out; |
e2933132 | 5884 | } |
7a3e97b0 | 5885 | |
3441da7d SRT |
5886 | /* clear the commands that were pending for corresponding LUN */ |
5887 | for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { | |
5888 | if (hba->lrb[pos].lun == lrbp->lun) { | |
5889 | err = ufshcd_clear_cmd(hba, pos); | |
5890 | if (err) | |
5891 | break; | |
7a3e97b0 | 5892 | } |
3441da7d SRT |
5893 | } |
5894 | spin_lock_irqsave(host->host_lock, flags); | |
5895 | ufshcd_transfer_req_compl(hba); | |
5896 | spin_unlock_irqrestore(host->host_lock, flags); | |
7fabb77b | 5897 | |
7a3e97b0 | 5898 | out: |
7fabb77b | 5899 | hba->req_abort_count = 0; |
3441da7d SRT |
5900 | if (!err) { |
5901 | err = SUCCESS; | |
5902 | } else { | |
5903 | dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); | |
5904 | err = FAILED; | |
5905 | } | |
7a3e97b0 SY |
5906 | return err; |
5907 | } | |
5908 | ||
e0b299e3 GB |
5909 | static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) |
5910 | { | |
5911 | struct ufshcd_lrb *lrbp; | |
5912 | int tag; | |
5913 | ||
5914 | for_each_set_bit(tag, &bitmap, hba->nutrs) { | |
5915 | lrbp = &hba->lrb[tag]; | |
5916 | lrbp->req_abort_skip = true; | |
5917 | } | |
5918 | } | |
5919 | ||
7a3e97b0 SY |
5920 | /** |
5921 | * ufshcd_abort - abort a specific command | |
5922 | * @cmd: SCSI command pointer | |
5923 | * | |
f20810d8 SRT |
5924 | * Abort the pending command in device by sending UFS_ABORT_TASK task management |
5925 | * command, and in host controller by clearing the door-bell register. There can | |
5926 | * be race between controller sending the command to the device while abort is | |
5927 | * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is | |
5928 | * really issued and then try to abort it. | |
5929 | * | |
7a3e97b0 SY |
5930 | * Returns SUCCESS/FAILED |
5931 | */ | |
5932 | static int ufshcd_abort(struct scsi_cmnd *cmd) | |
5933 | { | |
5934 | struct Scsi_Host *host; | |
5935 | struct ufs_hba *hba; | |
5936 | unsigned long flags; | |
5937 | unsigned int tag; | |
f20810d8 SRT |
5938 | int err = 0; |
5939 | int poll_cnt; | |
e2933132 SRT |
5940 | u8 resp = 0xF; |
5941 | struct ufshcd_lrb *lrbp; | |
e9d501b1 | 5942 | u32 reg; |
7a3e97b0 SY |
5943 | |
5944 | host = cmd->device->host; | |
5945 | hba = shost_priv(host); | |
5946 | tag = cmd->request->tag; | |
e7d38257 | 5947 | lrbp = &hba->lrb[tag]; |
14497328 YG |
5948 | if (!ufshcd_valid_tag(hba, tag)) { |
5949 | dev_err(hba->dev, | |
5950 | "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", | |
5951 | __func__, tag, cmd, cmd->request); | |
5952 | BUG(); | |
5953 | } | |
7a3e97b0 | 5954 | |
e7d38257 DR |
5955 | /* |
5956 | * Task abort to the device W-LUN is illegal. When this command | |
5957 | * will fail, due to spec violation, scsi err handling next step | |
5958 | * will be to send LU reset which, again, is a spec violation. | |
5959 | * To avoid these unnecessary/illegal step we skip to the last error | |
5960 | * handling stage: reset and restore. | |
5961 | */ | |
5962 | if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) | |
5963 | return ufshcd_eh_host_reset_handler(cmd); | |
5964 | ||
1ab27c9c | 5965 | ufshcd_hold(hba, false); |
14497328 | 5966 | reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
f20810d8 | 5967 | /* If command is already aborted/completed, return SUCCESS */ |
14497328 YG |
5968 | if (!(test_bit(tag, &hba->outstanding_reqs))) { |
5969 | dev_err(hba->dev, | |
5970 | "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n", | |
5971 | __func__, tag, hba->outstanding_reqs, reg); | |
f20810d8 | 5972 | goto out; |
14497328 | 5973 | } |
7a3e97b0 | 5974 | |
e9d501b1 DR |
5975 | if (!(reg & (1 << tag))) { |
5976 | dev_err(hba->dev, | |
5977 | "%s: cmd was completed, but without a notifying intr, tag = %d", | |
5978 | __func__, tag); | |
5979 | } | |
5980 | ||
66cc820f DR |
5981 | /* Print Transfer Request of aborted task */ |
5982 | dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); | |
66cc820f | 5983 | |
7fabb77b GB |
5984 | /* |
5985 | * Print detailed info about aborted request. | |
5986 | * As more than one request might get aborted at the same time, | |
5987 | * print full information only for the first aborted request in order | |
5988 | * to reduce repeated printouts. For other aborted requests only print | |
5989 | * basic details. | |
5990 | */ | |
5991 | scsi_print_command(hba->lrb[tag].cmd); | |
5992 | if (!hba->req_abort_count) { | |
5993 | ufshcd_print_host_regs(hba); | |
6ba65588 | 5994 | ufshcd_print_host_state(hba); |
7fabb77b GB |
5995 | ufshcd_print_pwr_info(hba); |
5996 | ufshcd_print_trs(hba, 1 << tag, true); | |
5997 | } else { | |
5998 | ufshcd_print_trs(hba, 1 << tag, false); | |
5999 | } | |
6000 | hba->req_abort_count++; | |
e0b299e3 GB |
6001 | |
6002 | /* Skip task abort in case previous aborts failed and report failure */ | |
6003 | if (lrbp->req_abort_skip) { | |
6004 | err = -EIO; | |
6005 | goto out; | |
6006 | } | |
6007 | ||
f20810d8 SRT |
6008 | for (poll_cnt = 100; poll_cnt; poll_cnt--) { |
6009 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, | |
6010 | UFS_QUERY_TASK, &resp); | |
6011 | if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { | |
6012 | /* cmd pending in the device */ | |
ff8e20c6 DR |
6013 | dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n", |
6014 | __func__, tag); | |
f20810d8 SRT |
6015 | break; |
6016 | } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { | |
f20810d8 SRT |
6017 | /* |
6018 | * cmd not pending in the device, check if it is | |
6019 | * in transition. | |
6020 | */ | |
ff8e20c6 DR |
6021 | dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", |
6022 | __func__, tag); | |
f20810d8 SRT |
6023 | reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
6024 | if (reg & (1 << tag)) { | |
6025 | /* sleep for max. 200us to stabilize */ | |
6026 | usleep_range(100, 200); | |
6027 | continue; | |
6028 | } | |
6029 | /* command completed already */ | |
ff8e20c6 DR |
6030 | dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", |
6031 | __func__, tag); | |
f20810d8 SRT |
6032 | goto out; |
6033 | } else { | |
ff8e20c6 DR |
6034 | dev_err(hba->dev, |
6035 | "%s: no response from device. tag = %d, err %d\n", | |
6036 | __func__, tag, err); | |
f20810d8 SRT |
6037 | if (!err) |
6038 | err = resp; /* service response error */ | |
6039 | goto out; | |
6040 | } | |
6041 | } | |
6042 | ||
6043 | if (!poll_cnt) { | |
6044 | err = -EBUSY; | |
7a3e97b0 SY |
6045 | goto out; |
6046 | } | |
7a3e97b0 | 6047 | |
e2933132 SRT |
6048 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, |
6049 | UFS_ABORT_TASK, &resp); | |
6050 | if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { | |
ff8e20c6 | 6051 | if (!err) { |
f20810d8 | 6052 | err = resp; /* service response error */ |
ff8e20c6 DR |
6053 | dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", |
6054 | __func__, tag, err); | |
6055 | } | |
7a3e97b0 | 6056 | goto out; |
e2933132 | 6057 | } |
7a3e97b0 | 6058 | |
f20810d8 | 6059 | err = ufshcd_clear_cmd(hba, tag); |
ff8e20c6 DR |
6060 | if (err) { |
6061 | dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", | |
6062 | __func__, tag, err); | |
f20810d8 | 6063 | goto out; |
ff8e20c6 | 6064 | } |
f20810d8 | 6065 | |
7a3e97b0 SY |
6066 | scsi_dma_unmap(cmd); |
6067 | ||
6068 | spin_lock_irqsave(host->host_lock, flags); | |
a48353f6 | 6069 | ufshcd_outstanding_req_clear(hba, tag); |
7a3e97b0 SY |
6070 | hba->lrb[tag].cmd = NULL; |
6071 | spin_unlock_irqrestore(host->host_lock, flags); | |
5a0b0cb9 SRT |
6072 | |
6073 | clear_bit_unlock(tag, &hba->lrb_in_use); | |
6074 | wake_up(&hba->dev_cmd.tag_wq); | |
1ab27c9c | 6075 | |
7a3e97b0 | 6076 | out: |
f20810d8 SRT |
6077 | if (!err) { |
6078 | err = SUCCESS; | |
6079 | } else { | |
6080 | dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); | |
e0b299e3 | 6081 | ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); |
f20810d8 SRT |
6082 | err = FAILED; |
6083 | } | |
6084 | ||
1ab27c9c ST |
6085 | /* |
6086 | * This ufshcd_release() corresponds to the original scsi cmd that got | |
6087 | * aborted here (as we won't get any IRQ for it). | |
6088 | */ | |
6089 | ufshcd_release(hba); | |
7a3e97b0 SY |
6090 | return err; |
6091 | } | |
6092 | ||
3441da7d SRT |
6093 | /** |
6094 | * ufshcd_host_reset_and_restore - reset and restore host controller | |
6095 | * @hba: per-adapter instance | |
6096 | * | |
6097 | * Note that host controller reset may issue DME_RESET to | |
6098 | * local and remote (device) Uni-Pro stack and the attributes | |
6099 | * are reset to default state. | |
6100 | * | |
6101 | * Returns zero on success, non-zero on failure | |
6102 | */ | |
6103 | static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) | |
6104 | { | |
6105 | int err; | |
3441da7d SRT |
6106 | unsigned long flags; |
6107 | ||
6108 | /* Reset the host controller */ | |
6109 | spin_lock_irqsave(hba->host->host_lock, flags); | |
596585a2 | 6110 | ufshcd_hba_stop(hba, false); |
3441da7d SRT |
6111 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
6112 | ||
a3cd5ec5 SJ |
6113 | /* scale up clocks to max frequency before full reinitialization */ |
6114 | ufshcd_scale_clks(hba, true); | |
6115 | ||
3441da7d SRT |
6116 | err = ufshcd_hba_enable(hba); |
6117 | if (err) | |
6118 | goto out; | |
6119 | ||
6120 | /* Establish the link again and restore the device */ | |
1d337ec2 SRT |
6121 | err = ufshcd_probe_hba(hba); |
6122 | ||
6123 | if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) | |
3441da7d SRT |
6124 | err = -EIO; |
6125 | out: | |
6126 | if (err) | |
6127 | dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); | |
6128 | ||
6129 | return err; | |
6130 | } | |
6131 | ||
6132 | /** | |
6133 | * ufshcd_reset_and_restore - reset and re-initialize host/device | |
6134 | * @hba: per-adapter instance | |
6135 | * | |
6136 | * Reset and recover device, host and re-establish link. This | |
6137 | * is helpful to recover the communication in fatal error conditions. | |
6138 | * | |
6139 | * Returns zero on success, non-zero on failure | |
6140 | */ | |
6141 | static int ufshcd_reset_and_restore(struct ufs_hba *hba) | |
6142 | { | |
6143 | int err = 0; | |
6144 | unsigned long flags; | |
1d337ec2 | 6145 | int retries = MAX_HOST_RESET_RETRIES; |
3441da7d | 6146 | |
1d337ec2 SRT |
6147 | do { |
6148 | err = ufshcd_host_reset_and_restore(hba); | |
6149 | } while (err && --retries); | |
3441da7d SRT |
6150 | |
6151 | /* | |
6152 | * After reset the door-bell might be cleared, complete | |
6153 | * outstanding requests in s/w here. | |
6154 | */ | |
6155 | spin_lock_irqsave(hba->host->host_lock, flags); | |
6156 | ufshcd_transfer_req_compl(hba); | |
6157 | ufshcd_tmc_handler(hba); | |
6158 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
6159 | ||
6160 | return err; | |
6161 | } | |
6162 | ||
6163 | /** | |
6164 | * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer | |
8aa29f19 | 6165 | * @cmd: SCSI command pointer |
3441da7d SRT |
6166 | * |
6167 | * Returns SUCCESS/FAILED | |
6168 | */ | |
6169 | static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) | |
6170 | { | |
6171 | int err; | |
6172 | unsigned long flags; | |
6173 | struct ufs_hba *hba; | |
6174 | ||
6175 | hba = shost_priv(cmd->device->host); | |
6176 | ||
1ab27c9c | 6177 | ufshcd_hold(hba, false); |
3441da7d SRT |
6178 | /* |
6179 | * Check if there is any race with fatal error handling. | |
6180 | * If so, wait for it to complete. Even though fatal error | |
6181 | * handling does reset and restore in some cases, don't assume | |
6182 | * anything out of it. We are just avoiding race here. | |
6183 | */ | |
6184 | do { | |
6185 | spin_lock_irqsave(hba->host->host_lock, flags); | |
e8e7f271 | 6186 | if (!(work_pending(&hba->eh_work) || |
8dc0da79 ZL |
6187 | hba->ufshcd_state == UFSHCD_STATE_RESET || |
6188 | hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED)) | |
3441da7d SRT |
6189 | break; |
6190 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
6191 | dev_dbg(hba->dev, "%s: reset in progress\n", __func__); | |
e8e7f271 | 6192 | flush_work(&hba->eh_work); |
3441da7d SRT |
6193 | } while (1); |
6194 | ||
6195 | hba->ufshcd_state = UFSHCD_STATE_RESET; | |
6196 | ufshcd_set_eh_in_progress(hba); | |
6197 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
6198 | ||
6199 | err = ufshcd_reset_and_restore(hba); | |
6200 | ||
6201 | spin_lock_irqsave(hba->host->host_lock, flags); | |
6202 | if (!err) { | |
6203 | err = SUCCESS; | |
6204 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; | |
6205 | } else { | |
6206 | err = FAILED; | |
6207 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | |
6208 | } | |
6209 | ufshcd_clear_eh_in_progress(hba); | |
6210 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
6211 | ||
1ab27c9c | 6212 | ufshcd_release(hba); |
3441da7d SRT |
6213 | return err; |
6214 | } | |
6215 | ||
3a4bf06d YG |
6216 | /** |
6217 | * ufshcd_get_max_icc_level - calculate the ICC level | |
6218 | * @sup_curr_uA: max. current supported by the regulator | |
6219 | * @start_scan: row at the desc table to start scan from | |
6220 | * @buff: power descriptor buffer | |
6221 | * | |
6222 | * Returns calculated max ICC level for specific regulator | |
6223 | */ | |
6224 | static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff) | |
6225 | { | |
6226 | int i; | |
6227 | int curr_uA; | |
6228 | u16 data; | |
6229 | u16 unit; | |
6230 | ||
6231 | for (i = start_scan; i >= 0; i--) { | |
d79713f9 | 6232 | data = be16_to_cpup((__be16 *)&buff[2 * i]); |
3a4bf06d YG |
6233 | unit = (data & ATTR_ICC_LVL_UNIT_MASK) >> |
6234 | ATTR_ICC_LVL_UNIT_OFFSET; | |
6235 | curr_uA = data & ATTR_ICC_LVL_VALUE_MASK; | |
6236 | switch (unit) { | |
6237 | case UFSHCD_NANO_AMP: | |
6238 | curr_uA = curr_uA / 1000; | |
6239 | break; | |
6240 | case UFSHCD_MILI_AMP: | |
6241 | curr_uA = curr_uA * 1000; | |
6242 | break; | |
6243 | case UFSHCD_AMP: | |
6244 | curr_uA = curr_uA * 1000 * 1000; | |
6245 | break; | |
6246 | case UFSHCD_MICRO_AMP: | |
6247 | default: | |
6248 | break; | |
6249 | } | |
6250 | if (sup_curr_uA >= curr_uA) | |
6251 | break; | |
6252 | } | |
6253 | if (i < 0) { | |
6254 | i = 0; | |
6255 | pr_err("%s: Couldn't find valid icc_level = %d", __func__, i); | |
6256 | } | |
6257 | ||
6258 | return (u32)i; | |
6259 | } | |
6260 | ||
6261 | /** | |
6262 | * ufshcd_calc_icc_level - calculate the max ICC level | |
6263 | * In case regulators are not initialized we'll return 0 | |
6264 | * @hba: per-adapter instance | |
6265 | * @desc_buf: power descriptor buffer to extract ICC levels from. | |
6266 | * @len: length of desc_buff | |
6267 | * | |
6268 | * Returns calculated ICC level | |
6269 | */ | |
6270 | static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, | |
6271 | u8 *desc_buf, int len) | |
6272 | { | |
6273 | u32 icc_level = 0; | |
6274 | ||
6275 | if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || | |
6276 | !hba->vreg_info.vccq2) { | |
6277 | dev_err(hba->dev, | |
6278 | "%s: Regulator capability was not set, actvIccLevel=%d", | |
6279 | __func__, icc_level); | |
6280 | goto out; | |
6281 | } | |
6282 | ||
6283 | if (hba->vreg_info.vcc) | |
6284 | icc_level = ufshcd_get_max_icc_level( | |
6285 | hba->vreg_info.vcc->max_uA, | |
6286 | POWER_DESC_MAX_ACTV_ICC_LVLS - 1, | |
6287 | &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]); | |
6288 | ||
6289 | if (hba->vreg_info.vccq) | |
6290 | icc_level = ufshcd_get_max_icc_level( | |
6291 | hba->vreg_info.vccq->max_uA, | |
6292 | icc_level, | |
6293 | &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]); | |
6294 | ||
6295 | if (hba->vreg_info.vccq2) | |
6296 | icc_level = ufshcd_get_max_icc_level( | |
6297 | hba->vreg_info.vccq2->max_uA, | |
6298 | icc_level, | |
6299 | &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]); | |
6300 | out: | |
6301 | return icc_level; | |
6302 | } | |
6303 | ||
6304 | static void ufshcd_init_icc_levels(struct ufs_hba *hba) | |
6305 | { | |
6306 | int ret; | |
a4b0e8a4 | 6307 | int buff_len = hba->desc_size.pwr_desc; |
bbe21d7a KC |
6308 | u8 *desc_buf; |
6309 | ||
6310 | desc_buf = kmalloc(buff_len, GFP_KERNEL); | |
6311 | if (!desc_buf) | |
6312 | return; | |
3a4bf06d YG |
6313 | |
6314 | ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); | |
6315 | if (ret) { | |
6316 | dev_err(hba->dev, | |
6317 | "%s: Failed reading power descriptor.len = %d ret = %d", | |
6318 | __func__, buff_len, ret); | |
bbe21d7a | 6319 | goto out; |
3a4bf06d YG |
6320 | } |
6321 | ||
6322 | hba->init_prefetch_data.icc_level = | |
6323 | ufshcd_find_max_sup_active_icc_level(hba, | |
6324 | desc_buf, buff_len); | |
6325 | dev_dbg(hba->dev, "%s: setting icc_level 0x%x", | |
6326 | __func__, hba->init_prefetch_data.icc_level); | |
6327 | ||
dbd34a61 SM |
6328 | ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, |
6329 | QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, | |
6330 | &hba->init_prefetch_data.icc_level); | |
3a4bf06d YG |
6331 | |
6332 | if (ret) | |
6333 | dev_err(hba->dev, | |
6334 | "%s: Failed configuring bActiveICCLevel = %d ret = %d", | |
6335 | __func__, hba->init_prefetch_data.icc_level , ret); | |
6336 | ||
bbe21d7a KC |
6337 | out: |
6338 | kfree(desc_buf); | |
3a4bf06d YG |
6339 | } |
6340 | ||
2a8fa600 SJ |
6341 | /** |
6342 | * ufshcd_scsi_add_wlus - Adds required W-LUs | |
6343 | * @hba: per-adapter instance | |
6344 | * | |
6345 | * UFS device specification requires the UFS devices to support 4 well known | |
6346 | * logical units: | |
6347 | * "REPORT_LUNS" (address: 01h) | |
6348 | * "UFS Device" (address: 50h) | |
6349 | * "RPMB" (address: 44h) | |
6350 | * "BOOT" (address: 30h) | |
6351 | * UFS device's power management needs to be controlled by "POWER CONDITION" | |
6352 | * field of SSU (START STOP UNIT) command. But this "power condition" field | |
6353 | * will take effect only when its sent to "UFS device" well known logical unit | |
6354 | * hence we require the scsi_device instance to represent this logical unit in | |
6355 | * order for the UFS host driver to send the SSU command for power management. | |
8aa29f19 | 6356 | * |
2a8fa600 SJ |
6357 | * We also require the scsi_device instance for "RPMB" (Replay Protected Memory |
6358 | * Block) LU so user space process can control this LU. User space may also | |
6359 | * want to have access to BOOT LU. | |
8aa29f19 | 6360 | * |
2a8fa600 SJ |
6361 | * This function adds scsi device instances for each of all well known LUs |
6362 | * (except "REPORT LUNS" LU). | |
6363 | * | |
6364 | * Returns zero on success (all required W-LUs are added successfully), | |
6365 | * non-zero error value on failure (if failed to add any of the required W-LU). | |
6366 | */ | |
6367 | static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) | |
6368 | { | |
6369 | int ret = 0; | |
7c48bfd0 AM |
6370 | struct scsi_device *sdev_rpmb; |
6371 | struct scsi_device *sdev_boot; | |
2a8fa600 SJ |
6372 | |
6373 | hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, | |
6374 | ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); | |
6375 | if (IS_ERR(hba->sdev_ufs_device)) { | |
6376 | ret = PTR_ERR(hba->sdev_ufs_device); | |
6377 | hba->sdev_ufs_device = NULL; | |
6378 | goto out; | |
6379 | } | |
7c48bfd0 | 6380 | scsi_device_put(hba->sdev_ufs_device); |
2a8fa600 | 6381 | |
7c48bfd0 | 6382 | sdev_rpmb = __scsi_add_device(hba->host, 0, 0, |
2a8fa600 | 6383 | ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); |
7c48bfd0 AM |
6384 | if (IS_ERR(sdev_rpmb)) { |
6385 | ret = PTR_ERR(sdev_rpmb); | |
3d21fbde | 6386 | goto remove_sdev_ufs_device; |
2a8fa600 | 6387 | } |
7c48bfd0 | 6388 | scsi_device_put(sdev_rpmb); |
3d21fbde HK |
6389 | |
6390 | sdev_boot = __scsi_add_device(hba->host, 0, 0, | |
6391 | ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); | |
6392 | if (IS_ERR(sdev_boot)) | |
6393 | dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__); | |
6394 | else | |
6395 | scsi_device_put(sdev_boot); | |
2a8fa600 SJ |
6396 | goto out; |
6397 | ||
2a8fa600 SJ |
6398 | remove_sdev_ufs_device: |
6399 | scsi_remove_device(hba->sdev_ufs_device); | |
6400 | out: | |
6401 | return ret; | |
6402 | } | |
6403 | ||
93fdd5ac TW |
6404 | static int ufs_get_device_desc(struct ufs_hba *hba, |
6405 | struct ufs_dev_desc *dev_desc) | |
c58ab7aa YG |
6406 | { |
6407 | int err; | |
bbe21d7a | 6408 | size_t buff_len; |
c58ab7aa | 6409 | u8 model_index; |
bbe21d7a KC |
6410 | u8 *desc_buf; |
6411 | ||
6412 | buff_len = max_t(size_t, hba->desc_size.dev_desc, | |
6413 | QUERY_DESC_MAX_SIZE + 1); | |
6414 | desc_buf = kmalloc(buff_len, GFP_KERNEL); | |
6415 | if (!desc_buf) { | |
6416 | err = -ENOMEM; | |
6417 | goto out; | |
6418 | } | |
c58ab7aa | 6419 | |
a4b0e8a4 | 6420 | err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); |
c58ab7aa YG |
6421 | if (err) { |
6422 | dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", | |
6423 | __func__, err); | |
6424 | goto out; | |
6425 | } | |
6426 | ||
6427 | /* | |
6428 | * getting vendor (manufacturerID) and Bank Index in big endian | |
6429 | * format | |
6430 | */ | |
93fdd5ac | 6431 | dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | |
c58ab7aa YG |
6432 | desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; |
6433 | ||
6434 | model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; | |
6435 | ||
bbe21d7a KC |
6436 | /* Zero-pad entire buffer for string termination. */ |
6437 | memset(desc_buf, 0, buff_len); | |
6438 | ||
6439 | err = ufshcd_read_string_desc(hba, model_index, desc_buf, | |
8aa29f19 | 6440 | QUERY_DESC_MAX_SIZE, true/*ASCII*/); |
c58ab7aa YG |
6441 | if (err) { |
6442 | dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", | |
6443 | __func__, err); | |
6444 | goto out; | |
6445 | } | |
6446 | ||
bbe21d7a KC |
6447 | desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; |
6448 | strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE), | |
6449 | min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET], | |
c58ab7aa YG |
6450 | MAX_MODEL_LEN)); |
6451 | ||
6452 | /* Null terminate the model string */ | |
93fdd5ac | 6453 | dev_desc->model[MAX_MODEL_LEN] = '\0'; |
c58ab7aa YG |
6454 | |
6455 | out: | |
bbe21d7a | 6456 | kfree(desc_buf); |
c58ab7aa YG |
6457 | return err; |
6458 | } | |
6459 | ||
93fdd5ac TW |
6460 | static void ufs_fixup_device_setup(struct ufs_hba *hba, |
6461 | struct ufs_dev_desc *dev_desc) | |
c58ab7aa | 6462 | { |
c58ab7aa | 6463 | struct ufs_dev_fix *f; |
c58ab7aa YG |
6464 | |
6465 | for (f = ufs_fixups; f->quirk; f++) { | |
93fdd5ac TW |
6466 | if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid || |
6467 | f->card.wmanufacturerid == UFS_ANY_VENDOR) && | |
6468 | (STR_PRFX_EQUAL(f->card.model, dev_desc->model) || | |
c58ab7aa YG |
6469 | !strcmp(f->card.model, UFS_ANY_MODEL))) |
6470 | hba->dev_quirks |= f->quirk; | |
6471 | } | |
6472 | } | |
6473 | ||
37113106 YG |
6474 | /** |
6475 | * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro | |
6476 | * @hba: per-adapter instance | |
6477 | * | |
6478 | * PA_TActivate parameter can be tuned manually if UniPro version is less than | |
6479 | * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's | |
6480 | * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce | |
6481 | * the hibern8 exit latency. | |
6482 | * | |
6483 | * Returns zero on success, non-zero error value on failure. | |
6484 | */ | |
6485 | static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) | |
6486 | { | |
6487 | int ret = 0; | |
6488 | u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate; | |
6489 | ||
6490 | ret = ufshcd_dme_peer_get(hba, | |
6491 | UIC_ARG_MIB_SEL( | |
6492 | RX_MIN_ACTIVATETIME_CAPABILITY, | |
6493 | UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), | |
6494 | &peer_rx_min_activatetime); | |
6495 | if (ret) | |
6496 | goto out; | |
6497 | ||
6498 | /* make sure proper unit conversion is applied */ | |
6499 | tuned_pa_tactivate = | |
6500 | ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US) | |
6501 | / PA_TACTIVATE_TIME_UNIT_US); | |
6502 | ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), | |
6503 | tuned_pa_tactivate); | |
6504 | ||
6505 | out: | |
6506 | return ret; | |
6507 | } | |
6508 | ||
6509 | /** | |
6510 | * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro | |
6511 | * @hba: per-adapter instance | |
6512 | * | |
6513 | * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than | |
6514 | * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's | |
6515 | * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY. | |
6516 | * This optimal value can help reduce the hibern8 exit latency. | |
6517 | * | |
6518 | * Returns zero on success, non-zero error value on failure. | |
6519 | */ | |
6520 | static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) | |
6521 | { | |
6522 | int ret = 0; | |
6523 | u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0; | |
6524 | u32 max_hibern8_time, tuned_pa_hibern8time; | |
6525 | ||
6526 | ret = ufshcd_dme_get(hba, | |
6527 | UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY, | |
6528 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), | |
6529 | &local_tx_hibern8_time_cap); | |
6530 | if (ret) | |
6531 | goto out; | |
6532 | ||
6533 | ret = ufshcd_dme_peer_get(hba, | |
6534 | UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY, | |
6535 | UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), | |
6536 | &peer_rx_hibern8_time_cap); | |
6537 | if (ret) | |
6538 | goto out; | |
6539 | ||
6540 | max_hibern8_time = max(local_tx_hibern8_time_cap, | |
6541 | peer_rx_hibern8_time_cap); | |
6542 | /* make sure proper unit conversion is applied */ | |
6543 | tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US) | |
6544 | / PA_HIBERN8_TIME_UNIT_US); | |
6545 | ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), | |
6546 | tuned_pa_hibern8time); | |
6547 | out: | |
6548 | return ret; | |
6549 | } | |
6550 | ||
c6a6db43 SJ |
6551 | /** |
6552 | * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is | |
6553 | * less than device PA_TACTIVATE time. | |
6554 | * @hba: per-adapter instance | |
6555 | * | |
6556 | * Some UFS devices require host PA_TACTIVATE to be lower than device | |
6557 | * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk | |
6558 | * for such devices. | |
6559 | * | |
6560 | * Returns zero on success, non-zero error value on failure. | |
6561 | */ | |
6562 | static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) | |
6563 | { | |
6564 | int ret = 0; | |
6565 | u32 granularity, peer_granularity; | |
6566 | u32 pa_tactivate, peer_pa_tactivate; | |
6567 | u32 pa_tactivate_us, peer_pa_tactivate_us; | |
6568 | u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100}; | |
6569 | ||
6570 | ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), | |
6571 | &granularity); | |
6572 | if (ret) | |
6573 | goto out; | |
6574 | ||
6575 | ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), | |
6576 | &peer_granularity); | |
6577 | if (ret) | |
6578 | goto out; | |
6579 | ||
6580 | if ((granularity < PA_GRANULARITY_MIN_VAL) || | |
6581 | (granularity > PA_GRANULARITY_MAX_VAL)) { | |
6582 | dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d", | |
6583 | __func__, granularity); | |
6584 | return -EINVAL; | |
6585 | } | |
6586 | ||
6587 | if ((peer_granularity < PA_GRANULARITY_MIN_VAL) || | |
6588 | (peer_granularity > PA_GRANULARITY_MAX_VAL)) { | |
6589 | dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d", | |
6590 | __func__, peer_granularity); | |
6591 | return -EINVAL; | |
6592 | } | |
6593 | ||
6594 | ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); | |
6595 | if (ret) | |
6596 | goto out; | |
6597 | ||
6598 | ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), | |
6599 | &peer_pa_tactivate); | |
6600 | if (ret) | |
6601 | goto out; | |
6602 | ||
6603 | pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1]; | |
6604 | peer_pa_tactivate_us = peer_pa_tactivate * | |
6605 | gran_to_us_table[peer_granularity - 1]; | |
6606 | ||
6607 | if (pa_tactivate_us > peer_pa_tactivate_us) { | |
6608 | u32 new_peer_pa_tactivate; | |
6609 | ||
6610 | new_peer_pa_tactivate = pa_tactivate_us / | |
6611 | gran_to_us_table[peer_granularity - 1]; | |
6612 | new_peer_pa_tactivate++; | |
6613 | ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), | |
6614 | new_peer_pa_tactivate); | |
6615 | } | |
6616 | ||
6617 | out: | |
6618 | return ret; | |
6619 | } | |
6620 | ||
37113106 YG |
6621 | static void ufshcd_tune_unipro_params(struct ufs_hba *hba) |
6622 | { | |
6623 | if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { | |
6624 | ufshcd_tune_pa_tactivate(hba); | |
6625 | ufshcd_tune_pa_hibern8time(hba); | |
6626 | } | |
6627 | ||
6628 | if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) | |
6629 | /* set 1ms timeout for PA_TACTIVATE */ | |
6630 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); | |
c6a6db43 SJ |
6631 | |
6632 | if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) | |
6633 | ufshcd_quirk_tune_host_pa_tactivate(hba); | |
56d4a186 SJ |
6634 | |
6635 | ufshcd_vops_apply_dev_quirks(hba); | |
37113106 YG |
6636 | } |
6637 | ||
ff8e20c6 DR |
6638 | static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) |
6639 | { | |
6640 | int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist); | |
6641 | ||
6642 | hba->ufs_stats.hibern8_exit_cnt = 0; | |
6643 | hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); | |
6644 | ||
6645 | memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size); | |
6646 | memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size); | |
6647 | memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size); | |
6648 | memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size); | |
6649 | memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size); | |
7fabb77b GB |
6650 | |
6651 | hba->req_abort_count = 0; | |
ff8e20c6 DR |
6652 | } |
6653 | ||
a4b0e8a4 PM |
6654 | static void ufshcd_init_desc_sizes(struct ufs_hba *hba) |
6655 | { | |
6656 | int err; | |
6657 | ||
6658 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0, | |
6659 | &hba->desc_size.dev_desc); | |
6660 | if (err) | |
6661 | hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; | |
6662 | ||
6663 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0, | |
6664 | &hba->desc_size.pwr_desc); | |
6665 | if (err) | |
6666 | hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; | |
6667 | ||
6668 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0, | |
6669 | &hba->desc_size.interc_desc); | |
6670 | if (err) | |
6671 | hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; | |
6672 | ||
6673 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0, | |
6674 | &hba->desc_size.conf_desc); | |
6675 | if (err) | |
6676 | hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; | |
6677 | ||
6678 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0, | |
6679 | &hba->desc_size.unit_desc); | |
6680 | if (err) | |
6681 | hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; | |
6682 | ||
6683 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0, | |
6684 | &hba->desc_size.geom_desc); | |
6685 | if (err) | |
6686 | hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; | |
c648c2d2 SN |
6687 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0, |
6688 | &hba->desc_size.hlth_desc); | |
6689 | if (err) | |
6690 | hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; | |
a4b0e8a4 PM |
6691 | } |
6692 | ||
6693 | static void ufshcd_def_desc_sizes(struct ufs_hba *hba) | |
6694 | { | |
6695 | hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; | |
6696 | hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; | |
6697 | hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; | |
6698 | hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; | |
6699 | hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; | |
6700 | hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; | |
c648c2d2 | 6701 | hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; |
a4b0e8a4 PM |
6702 | } |
6703 | ||
6ccf44fe | 6704 | /** |
1d337ec2 SRT |
6705 | * ufshcd_probe_hba - probe hba to detect device and initialize |
6706 | * @hba: per-adapter instance | |
6707 | * | |
6708 | * Execute link-startup and verify device initialization | |
6ccf44fe | 6709 | */ |
1d337ec2 | 6710 | static int ufshcd_probe_hba(struct ufs_hba *hba) |
6ccf44fe | 6711 | { |
93fdd5ac | 6712 | struct ufs_dev_desc card = {0}; |
6ccf44fe | 6713 | int ret; |
7ff5ab47 | 6714 | ktime_t start = ktime_get(); |
6ccf44fe SJ |
6715 | |
6716 | ret = ufshcd_link_startup(hba); | |
5a0b0cb9 SRT |
6717 | if (ret) |
6718 | goto out; | |
6719 | ||
afdfff59 YG |
6720 | /* set the default level for urgent bkops */ |
6721 | hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; | |
6722 | hba->is_urgent_bkops_lvl_checked = false; | |
6723 | ||
ff8e20c6 DR |
6724 | /* Debug counters initialization */ |
6725 | ufshcd_clear_dbg_ufs_stats(hba); | |
6726 | ||
57d104c1 SJ |
6727 | /* UniPro link is active now */ |
6728 | ufshcd_set_link_active(hba); | |
d3e89bac | 6729 | |
ad448378 AH |
6730 | /* Enable Auto-Hibernate if configured */ |
6731 | ufshcd_auto_hibern8_enable(hba); | |
6732 | ||
5a0b0cb9 SRT |
6733 | ret = ufshcd_verify_dev_init(hba); |
6734 | if (ret) | |
6735 | goto out; | |
68078d5c DR |
6736 | |
6737 | ret = ufshcd_complete_dev_init(hba); | |
6738 | if (ret) | |
6739 | goto out; | |
5a0b0cb9 | 6740 | |
a4b0e8a4 PM |
6741 | /* Init check for device descriptor sizes */ |
6742 | ufshcd_init_desc_sizes(hba); | |
6743 | ||
93fdd5ac TW |
6744 | ret = ufs_get_device_desc(hba, &card); |
6745 | if (ret) { | |
6746 | dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", | |
6747 | __func__, ret); | |
6748 | goto out; | |
6749 | } | |
6750 | ||
6751 | ufs_fixup_device_setup(hba, &card); | |
37113106 | 6752 | ufshcd_tune_unipro_params(hba); |
60f01870 YG |
6753 | |
6754 | ret = ufshcd_set_vccq_rail_unused(hba, | |
6755 | (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false); | |
6756 | if (ret) | |
6757 | goto out; | |
6758 | ||
57d104c1 SJ |
6759 | /* UFS device is also active now */ |
6760 | ufshcd_set_ufs_dev_active(hba); | |
66ec6d59 | 6761 | ufshcd_force_reset_auto_bkops(hba); |
57d104c1 SJ |
6762 | hba->wlun_dev_clr_ua = true; |
6763 | ||
7eb584db DR |
6764 | if (ufshcd_get_max_pwr_mode(hba)) { |
6765 | dev_err(hba->dev, | |
6766 | "%s: Failed getting max supported power mode\n", | |
6767 | __func__); | |
6768 | } else { | |
6769 | ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); | |
8643ae66 | 6770 | if (ret) { |
7eb584db DR |
6771 | dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", |
6772 | __func__, ret); | |
8643ae66 DL |
6773 | goto out; |
6774 | } | |
7eb584db | 6775 | } |
57d104c1 | 6776 | |
53c12d0e YG |
6777 | /* set the state as operational after switching to desired gear */ |
6778 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; | |
a4b0e8a4 | 6779 | |
57d104c1 SJ |
6780 | /* |
6781 | * If we are in error handling context or in power management callbacks | |
6782 | * context, no need to scan the host | |
6783 | */ | |
6784 | if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { | |
6785 | bool flag; | |
6786 | ||
6787 | /* clear any previous UFS device information */ | |
6788 | memset(&hba->dev_info, 0, sizeof(hba->dev_info)); | |
dc3c8d3a YG |
6789 | if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, |
6790 | QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) | |
57d104c1 | 6791 | hba->dev_info.f_power_on_wp_en = flag; |
3441da7d | 6792 | |
3a4bf06d YG |
6793 | if (!hba->is_init_prefetch) |
6794 | ufshcd_init_icc_levels(hba); | |
6795 | ||
2a8fa600 SJ |
6796 | /* Add required well known logical units to scsi mid layer */ |
6797 | if (ufshcd_scsi_add_wlus(hba)) | |
6798 | goto out; | |
6799 | ||
0701e49d SJ |
6800 | /* Initialize devfreq after UFS device is detected */ |
6801 | if (ufshcd_is_clkscaling_supported(hba)) { | |
6802 | memcpy(&hba->clk_scaling.saved_pwr_info.info, | |
6803 | &hba->pwr_info, | |
6804 | sizeof(struct ufs_pa_layer_attr)); | |
6805 | hba->clk_scaling.saved_pwr_info.is_valid = true; | |
6806 | if (!hba->devfreq) { | |
deac444f BA |
6807 | ret = ufshcd_devfreq_init(hba); |
6808 | if (ret) | |
0701e49d | 6809 | goto out; |
0701e49d SJ |
6810 | } |
6811 | hba->clk_scaling.is_allowed = true; | |
6812 | } | |
6813 | ||
df032bf2 AA |
6814 | ufs_bsg_probe(hba); |
6815 | ||
3441da7d SRT |
6816 | scsi_scan_host(hba->host); |
6817 | pm_runtime_put_sync(hba->dev); | |
6818 | } | |
3a4bf06d YG |
6819 | |
6820 | if (!hba->is_init_prefetch) | |
6821 | hba->is_init_prefetch = true; | |
6822 | ||
5a0b0cb9 | 6823 | out: |
1d337ec2 SRT |
6824 | /* |
6825 | * If we failed to initialize the device or the device is not | |
6826 | * present, turn off the power/clocks etc. | |
6827 | */ | |
57d104c1 SJ |
6828 | if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { |
6829 | pm_runtime_put_sync(hba->dev); | |
eebcc196 | 6830 | ufshcd_exit_clk_scaling(hba); |
1d337ec2 | 6831 | ufshcd_hba_exit(hba); |
57d104c1 | 6832 | } |
1d337ec2 | 6833 | |
7ff5ab47 SJ |
6834 | trace_ufshcd_init(dev_name(hba->dev), ret, |
6835 | ktime_to_us(ktime_sub(ktime_get(), start)), | |
73eba2be | 6836 | hba->curr_dev_pwr_mode, hba->uic_link_state); |
1d337ec2 SRT |
6837 | return ret; |
6838 | } | |
6839 | ||
6840 | /** | |
6841 | * ufshcd_async_scan - asynchronous execution for probing hba | |
6842 | * @data: data pointer to pass to this function | |
6843 | * @cookie: cookie data | |
6844 | */ | |
6845 | static void ufshcd_async_scan(void *data, async_cookie_t cookie) | |
6846 | { | |
6847 | struct ufs_hba *hba = (struct ufs_hba *)data; | |
6848 | ||
6849 | ufshcd_probe_hba(hba); | |
6ccf44fe SJ |
6850 | } |
6851 | ||
f550c65b YG |
6852 | static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd) |
6853 | { | |
6854 | unsigned long flags; | |
6855 | struct Scsi_Host *host; | |
6856 | struct ufs_hba *hba; | |
6857 | int index; | |
6858 | bool found = false; | |
6859 | ||
6860 | if (!scmd || !scmd->device || !scmd->device->host) | |
6600593c | 6861 | return BLK_EH_DONE; |
f550c65b YG |
6862 | |
6863 | host = scmd->device->host; | |
6864 | hba = shost_priv(host); | |
6865 | if (!hba) | |
6600593c | 6866 | return BLK_EH_DONE; |
f550c65b YG |
6867 | |
6868 | spin_lock_irqsave(host->host_lock, flags); | |
6869 | ||
6870 | for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) { | |
6871 | if (hba->lrb[index].cmd == scmd) { | |
6872 | found = true; | |
6873 | break; | |
6874 | } | |
6875 | } | |
6876 | ||
6877 | spin_unlock_irqrestore(host->host_lock, flags); | |
6878 | ||
6879 | /* | |
6880 | * Bypass SCSI error handling and reset the block layer timer if this | |
6881 | * SCSI command was not actually dispatched to UFS driver, otherwise | |
6882 | * let SCSI layer handle the error as usual. | |
6883 | */ | |
6600593c | 6884 | return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER; |
f550c65b YG |
6885 | } |
6886 | ||
d829fc8a SN |
6887 | static const struct attribute_group *ufshcd_driver_groups[] = { |
6888 | &ufs_sysfs_unit_descriptor_group, | |
ec92b59c | 6889 | &ufs_sysfs_lun_attributes_group, |
d829fc8a SN |
6890 | NULL, |
6891 | }; | |
6892 | ||
7a3e97b0 SY |
6893 | static struct scsi_host_template ufshcd_driver_template = { |
6894 | .module = THIS_MODULE, | |
6895 | .name = UFSHCD, | |
6896 | .proc_name = UFSHCD, | |
6897 | .queuecommand = ufshcd_queuecommand, | |
6898 | .slave_alloc = ufshcd_slave_alloc, | |
eeda4749 | 6899 | .slave_configure = ufshcd_slave_configure, |
7a3e97b0 | 6900 | .slave_destroy = ufshcd_slave_destroy, |
4264fd61 | 6901 | .change_queue_depth = ufshcd_change_queue_depth, |
7a3e97b0 | 6902 | .eh_abort_handler = ufshcd_abort, |
3441da7d SRT |
6903 | .eh_device_reset_handler = ufshcd_eh_device_reset_handler, |
6904 | .eh_host_reset_handler = ufshcd_eh_host_reset_handler, | |
f550c65b | 6905 | .eh_timed_out = ufshcd_eh_timed_out, |
7a3e97b0 SY |
6906 | .this_id = -1, |
6907 | .sg_tablesize = SG_ALL, | |
6908 | .cmd_per_lun = UFSHCD_CMD_PER_LUN, | |
6909 | .can_queue = UFSHCD_CAN_QUEUE, | |
1ab27c9c | 6910 | .max_host_blocked = 1, |
c40ecc12 | 6911 | .track_queue_depth = 1, |
d829fc8a | 6912 | .sdev_groups = ufshcd_driver_groups, |
7a3e97b0 SY |
6913 | }; |
6914 | ||
57d104c1 SJ |
6915 | static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, |
6916 | int ua) | |
6917 | { | |
7b16a07c | 6918 | int ret; |
57d104c1 | 6919 | |
7b16a07c BA |
6920 | if (!vreg) |
6921 | return 0; | |
57d104c1 | 6922 | |
7b16a07c BA |
6923 | ret = regulator_set_load(vreg->reg, ua); |
6924 | if (ret < 0) { | |
6925 | dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n", | |
6926 | __func__, vreg->name, ua, ret); | |
57d104c1 SJ |
6927 | } |
6928 | ||
6929 | return ret; | |
6930 | } | |
6931 | ||
6932 | static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, | |
6933 | struct ufs_vreg *vreg) | |
6934 | { | |
60f01870 YG |
6935 | if (!vreg) |
6936 | return 0; | |
6937 | else if (vreg->unused) | |
6938 | return 0; | |
6939 | else | |
6940 | return ufshcd_config_vreg_load(hba->dev, vreg, | |
6941 | UFS_VREG_LPM_LOAD_UA); | |
57d104c1 SJ |
6942 | } |
6943 | ||
6944 | static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, | |
6945 | struct ufs_vreg *vreg) | |
6946 | { | |
60f01870 YG |
6947 | if (!vreg) |
6948 | return 0; | |
6949 | else if (vreg->unused) | |
6950 | return 0; | |
6951 | else | |
6952 | return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); | |
57d104c1 SJ |
6953 | } |
6954 | ||
aa497613 SRT |
6955 | static int ufshcd_config_vreg(struct device *dev, |
6956 | struct ufs_vreg *vreg, bool on) | |
6957 | { | |
6958 | int ret = 0; | |
72753590 GS |
6959 | struct regulator *reg; |
6960 | const char *name; | |
aa497613 SRT |
6961 | int min_uV, uA_load; |
6962 | ||
6963 | BUG_ON(!vreg); | |
6964 | ||
72753590 GS |
6965 | reg = vreg->reg; |
6966 | name = vreg->name; | |
6967 | ||
aa497613 SRT |
6968 | if (regulator_count_voltages(reg) > 0) { |
6969 | min_uV = on ? vreg->min_uV : 0; | |
6970 | ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); | |
6971 | if (ret) { | |
6972 | dev_err(dev, "%s: %s set voltage failed, err=%d\n", | |
6973 | __func__, name, ret); | |
6974 | goto out; | |
6975 | } | |
6976 | ||
6977 | uA_load = on ? vreg->max_uA : 0; | |
57d104c1 SJ |
6978 | ret = ufshcd_config_vreg_load(dev, vreg, uA_load); |
6979 | if (ret) | |
aa497613 | 6980 | goto out; |
aa497613 SRT |
6981 | } |
6982 | out: | |
6983 | return ret; | |
6984 | } | |
6985 | ||
6986 | static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) | |
6987 | { | |
6988 | int ret = 0; | |
6989 | ||
60f01870 YG |
6990 | if (!vreg) |
6991 | goto out; | |
6992 | else if (vreg->enabled || vreg->unused) | |
aa497613 SRT |
6993 | goto out; |
6994 | ||
6995 | ret = ufshcd_config_vreg(dev, vreg, true); | |
6996 | if (!ret) | |
6997 | ret = regulator_enable(vreg->reg); | |
6998 | ||
6999 | if (!ret) | |
7000 | vreg->enabled = true; | |
7001 | else | |
7002 | dev_err(dev, "%s: %s enable failed, err=%d\n", | |
7003 | __func__, vreg->name, ret); | |
7004 | out: | |
7005 | return ret; | |
7006 | } | |
7007 | ||
7008 | static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) | |
7009 | { | |
7010 | int ret = 0; | |
7011 | ||
60f01870 YG |
7012 | if (!vreg) |
7013 | goto out; | |
7014 | else if (!vreg->enabled || vreg->unused) | |
aa497613 SRT |
7015 | goto out; |
7016 | ||
7017 | ret = regulator_disable(vreg->reg); | |
7018 | ||
7019 | if (!ret) { | |
7020 | /* ignore errors on applying disable config */ | |
7021 | ufshcd_config_vreg(dev, vreg, false); | |
7022 | vreg->enabled = false; | |
7023 | } else { | |
7024 | dev_err(dev, "%s: %s disable failed, err=%d\n", | |
7025 | __func__, vreg->name, ret); | |
7026 | } | |
7027 | out: | |
7028 | return ret; | |
7029 | } | |
7030 | ||
7031 | static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) | |
7032 | { | |
7033 | int ret = 0; | |
7034 | struct device *dev = hba->dev; | |
7035 | struct ufs_vreg_info *info = &hba->vreg_info; | |
7036 | ||
7037 | if (!info) | |
7038 | goto out; | |
7039 | ||
7040 | ret = ufshcd_toggle_vreg(dev, info->vcc, on); | |
7041 | if (ret) | |
7042 | goto out; | |
7043 | ||
7044 | ret = ufshcd_toggle_vreg(dev, info->vccq, on); | |
7045 | if (ret) | |
7046 | goto out; | |
7047 | ||
7048 | ret = ufshcd_toggle_vreg(dev, info->vccq2, on); | |
7049 | if (ret) | |
7050 | goto out; | |
7051 | ||
7052 | out: | |
7053 | if (ret) { | |
7054 | ufshcd_toggle_vreg(dev, info->vccq2, false); | |
7055 | ufshcd_toggle_vreg(dev, info->vccq, false); | |
7056 | ufshcd_toggle_vreg(dev, info->vcc, false); | |
7057 | } | |
7058 | return ret; | |
7059 | } | |
7060 | ||
6a771a65 RS |
7061 | static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) |
7062 | { | |
7063 | struct ufs_vreg_info *info = &hba->vreg_info; | |
7064 | ||
7065 | if (info) | |
7066 | return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); | |
7067 | ||
7068 | return 0; | |
7069 | } | |
7070 | ||
aa497613 SRT |
7071 | static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) |
7072 | { | |
7073 | int ret = 0; | |
7074 | ||
7075 | if (!vreg) | |
7076 | goto out; | |
7077 | ||
7078 | vreg->reg = devm_regulator_get(dev, vreg->name); | |
7079 | if (IS_ERR(vreg->reg)) { | |
7080 | ret = PTR_ERR(vreg->reg); | |
7081 | dev_err(dev, "%s: %s get failed, err=%d\n", | |
7082 | __func__, vreg->name, ret); | |
7083 | } | |
7084 | out: | |
7085 | return ret; | |
7086 | } | |
7087 | ||
7088 | static int ufshcd_init_vreg(struct ufs_hba *hba) | |
7089 | { | |
7090 | int ret = 0; | |
7091 | struct device *dev = hba->dev; | |
7092 | struct ufs_vreg_info *info = &hba->vreg_info; | |
7093 | ||
7094 | if (!info) | |
7095 | goto out; | |
7096 | ||
7097 | ret = ufshcd_get_vreg(dev, info->vcc); | |
7098 | if (ret) | |
7099 | goto out; | |
7100 | ||
7101 | ret = ufshcd_get_vreg(dev, info->vccq); | |
7102 | if (ret) | |
7103 | goto out; | |
7104 | ||
7105 | ret = ufshcd_get_vreg(dev, info->vccq2); | |
7106 | out: | |
7107 | return ret; | |
7108 | } | |
7109 | ||
6a771a65 RS |
7110 | static int ufshcd_init_hba_vreg(struct ufs_hba *hba) |
7111 | { | |
7112 | struct ufs_vreg_info *info = &hba->vreg_info; | |
7113 | ||
7114 | if (info) | |
7115 | return ufshcd_get_vreg(hba->dev, info->vdd_hba); | |
7116 | ||
7117 | return 0; | |
7118 | } | |
7119 | ||
60f01870 YG |
7120 | static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused) |
7121 | { | |
7122 | int ret = 0; | |
7123 | struct ufs_vreg_info *info = &hba->vreg_info; | |
7124 | ||
7125 | if (!info) | |
7126 | goto out; | |
7127 | else if (!info->vccq) | |
7128 | goto out; | |
7129 | ||
7130 | if (unused) { | |
7131 | /* shut off the rail here */ | |
7132 | ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false); | |
7133 | /* | |
7134 | * Mark this rail as no longer used, so it doesn't get enabled | |
7135 | * later by mistake | |
7136 | */ | |
7137 | if (!ret) | |
7138 | info->vccq->unused = true; | |
7139 | } else { | |
7140 | /* | |
7141 | * rail should have been already enabled hence just make sure | |
7142 | * that unused flag is cleared. | |
7143 | */ | |
7144 | info->vccq->unused = false; | |
7145 | } | |
7146 | out: | |
7147 | return ret; | |
7148 | } | |
7149 | ||
57d104c1 SJ |
7150 | static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, |
7151 | bool skip_ref_clk) | |
c6e79dac SRT |
7152 | { |
7153 | int ret = 0; | |
7154 | struct ufs_clk_info *clki; | |
7155 | struct list_head *head = &hba->clk_list_head; | |
1ab27c9c | 7156 | unsigned long flags; |
911a0771 SJ |
7157 | ktime_t start = ktime_get(); |
7158 | bool clk_state_changed = false; | |
c6e79dac | 7159 | |
566ec9ad | 7160 | if (list_empty(head)) |
c6e79dac SRT |
7161 | goto out; |
7162 | ||
b334456e SJ |
7163 | /* |
7164 | * vendor specific setup_clocks ops may depend on clocks managed by | |
7165 | * this standard driver hence call the vendor specific setup_clocks | |
7166 | * before disabling the clocks managed here. | |
7167 | */ | |
7168 | if (!on) { | |
7169 | ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); | |
7170 | if (ret) | |
7171 | return ret; | |
7172 | } | |
1e879e8f | 7173 | |
c6e79dac SRT |
7174 | list_for_each_entry(clki, head, list) { |
7175 | if (!IS_ERR_OR_NULL(clki->clk)) { | |
57d104c1 SJ |
7176 | if (skip_ref_clk && !strcmp(clki->name, "ref_clk")) |
7177 | continue; | |
7178 | ||
911a0771 | 7179 | clk_state_changed = on ^ clki->enabled; |
c6e79dac SRT |
7180 | if (on && !clki->enabled) { |
7181 | ret = clk_prepare_enable(clki->clk); | |
7182 | if (ret) { | |
7183 | dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", | |
7184 | __func__, clki->name, ret); | |
7185 | goto out; | |
7186 | } | |
7187 | } else if (!on && clki->enabled) { | |
7188 | clk_disable_unprepare(clki->clk); | |
7189 | } | |
7190 | clki->enabled = on; | |
7191 | dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, | |
7192 | clki->name, on ? "en" : "dis"); | |
7193 | } | |
7194 | } | |
1ab27c9c | 7195 | |
b334456e SJ |
7196 | /* |
7197 | * vendor specific setup_clocks ops may depend on clocks managed by | |
7198 | * this standard driver hence call the vendor specific setup_clocks | |
7199 | * after enabling the clocks managed here. | |
7200 | */ | |
7201 | if (on) { | |
7202 | ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); | |
7203 | if (ret) | |
7204 | return ret; | |
7205 | } | |
1e879e8f | 7206 | |
c6e79dac SRT |
7207 | out: |
7208 | if (ret) { | |
7209 | list_for_each_entry(clki, head, list) { | |
7210 | if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) | |
7211 | clk_disable_unprepare(clki->clk); | |
7212 | } | |
7ff5ab47 | 7213 | } else if (!ret && on) { |
1ab27c9c ST |
7214 | spin_lock_irqsave(hba->host->host_lock, flags); |
7215 | hba->clk_gating.state = CLKS_ON; | |
7ff5ab47 SJ |
7216 | trace_ufshcd_clk_gating(dev_name(hba->dev), |
7217 | hba->clk_gating.state); | |
1ab27c9c | 7218 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
c6e79dac | 7219 | } |
7ff5ab47 | 7220 | |
911a0771 SJ |
7221 | if (clk_state_changed) |
7222 | trace_ufshcd_profile_clk_gating(dev_name(hba->dev), | |
7223 | (on ? "on" : "off"), | |
7224 | ktime_to_us(ktime_sub(ktime_get(), start)), ret); | |
c6e79dac SRT |
7225 | return ret; |
7226 | } | |
7227 | ||
57d104c1 SJ |
7228 | static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) |
7229 | { | |
7230 | return __ufshcd_setup_clocks(hba, on, false); | |
7231 | } | |
7232 | ||
c6e79dac SRT |
7233 | static int ufshcd_init_clocks(struct ufs_hba *hba) |
7234 | { | |
7235 | int ret = 0; | |
7236 | struct ufs_clk_info *clki; | |
7237 | struct device *dev = hba->dev; | |
7238 | struct list_head *head = &hba->clk_list_head; | |
7239 | ||
566ec9ad | 7240 | if (list_empty(head)) |
c6e79dac SRT |
7241 | goto out; |
7242 | ||
7243 | list_for_each_entry(clki, head, list) { | |
7244 | if (!clki->name) | |
7245 | continue; | |
7246 | ||
7247 | clki->clk = devm_clk_get(dev, clki->name); | |
7248 | if (IS_ERR(clki->clk)) { | |
7249 | ret = PTR_ERR(clki->clk); | |
7250 | dev_err(dev, "%s: %s clk get failed, %d\n", | |
7251 | __func__, clki->name, ret); | |
7252 | goto out; | |
7253 | } | |
7254 | ||
7255 | if (clki->max_freq) { | |
7256 | ret = clk_set_rate(clki->clk, clki->max_freq); | |
7257 | if (ret) { | |
7258 | dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", | |
7259 | __func__, clki->name, | |
7260 | clki->max_freq, ret); | |
7261 | goto out; | |
7262 | } | |
856b3483 | 7263 | clki->curr_freq = clki->max_freq; |
c6e79dac SRT |
7264 | } |
7265 | dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, | |
7266 | clki->name, clk_get_rate(clki->clk)); | |
7267 | } | |
7268 | out: | |
7269 | return ret; | |
7270 | } | |
7271 | ||
5c0c28a8 SRT |
7272 | static int ufshcd_variant_hba_init(struct ufs_hba *hba) |
7273 | { | |
7274 | int err = 0; | |
7275 | ||
7276 | if (!hba->vops) | |
7277 | goto out; | |
7278 | ||
0263bcd0 YG |
7279 | err = ufshcd_vops_init(hba); |
7280 | if (err) | |
7281 | goto out; | |
5c0c28a8 | 7282 | |
0263bcd0 YG |
7283 | err = ufshcd_vops_setup_regulators(hba, true); |
7284 | if (err) | |
7285 | goto out_exit; | |
5c0c28a8 SRT |
7286 | |
7287 | goto out; | |
7288 | ||
5c0c28a8 | 7289 | out_exit: |
0263bcd0 | 7290 | ufshcd_vops_exit(hba); |
5c0c28a8 SRT |
7291 | out: |
7292 | if (err) | |
7293 | dev_err(hba->dev, "%s: variant %s init failed err %d\n", | |
0263bcd0 | 7294 | __func__, ufshcd_get_var_name(hba), err); |
5c0c28a8 SRT |
7295 | return err; |
7296 | } | |
7297 | ||
7298 | static void ufshcd_variant_hba_exit(struct ufs_hba *hba) | |
7299 | { | |
7300 | if (!hba->vops) | |
7301 | return; | |
7302 | ||
0263bcd0 | 7303 | ufshcd_vops_setup_regulators(hba, false); |
5c0c28a8 | 7304 | |
0263bcd0 | 7305 | ufshcd_vops_exit(hba); |
5c0c28a8 SRT |
7306 | } |
7307 | ||
aa497613 SRT |
7308 | static int ufshcd_hba_init(struct ufs_hba *hba) |
7309 | { | |
7310 | int err; | |
7311 | ||
6a771a65 RS |
7312 | /* |
7313 | * Handle host controller power separately from the UFS device power | |
7314 | * rails as it will help controlling the UFS host controller power | |
7315 | * collapse easily which is different than UFS device power collapse. | |
7316 | * Also, enable the host controller power before we go ahead with rest | |
7317 | * of the initialization here. | |
7318 | */ | |
7319 | err = ufshcd_init_hba_vreg(hba); | |
aa497613 SRT |
7320 | if (err) |
7321 | goto out; | |
7322 | ||
6a771a65 | 7323 | err = ufshcd_setup_hba_vreg(hba, true); |
aa497613 SRT |
7324 | if (err) |
7325 | goto out; | |
7326 | ||
6a771a65 RS |
7327 | err = ufshcd_init_clocks(hba); |
7328 | if (err) | |
7329 | goto out_disable_hba_vreg; | |
7330 | ||
7331 | err = ufshcd_setup_clocks(hba, true); | |
7332 | if (err) | |
7333 | goto out_disable_hba_vreg; | |
7334 | ||
c6e79dac SRT |
7335 | err = ufshcd_init_vreg(hba); |
7336 | if (err) | |
7337 | goto out_disable_clks; | |
7338 | ||
7339 | err = ufshcd_setup_vreg(hba, true); | |
7340 | if (err) | |
7341 | goto out_disable_clks; | |
7342 | ||
aa497613 SRT |
7343 | err = ufshcd_variant_hba_init(hba); |
7344 | if (err) | |
7345 | goto out_disable_vreg; | |
7346 | ||
1d337ec2 | 7347 | hba->is_powered = true; |
aa497613 SRT |
7348 | goto out; |
7349 | ||
7350 | out_disable_vreg: | |
7351 | ufshcd_setup_vreg(hba, false); | |
c6e79dac SRT |
7352 | out_disable_clks: |
7353 | ufshcd_setup_clocks(hba, false); | |
6a771a65 RS |
7354 | out_disable_hba_vreg: |
7355 | ufshcd_setup_hba_vreg(hba, false); | |
aa497613 SRT |
7356 | out: |
7357 | return err; | |
7358 | } | |
7359 | ||
7360 | static void ufshcd_hba_exit(struct ufs_hba *hba) | |
7361 | { | |
1d337ec2 SRT |
7362 | if (hba->is_powered) { |
7363 | ufshcd_variant_hba_exit(hba); | |
7364 | ufshcd_setup_vreg(hba, false); | |
a508253d | 7365 | ufshcd_suspend_clkscaling(hba); |
eebcc196 | 7366 | if (ufshcd_is_clkscaling_supported(hba)) |
0701e49d SJ |
7367 | if (hba->devfreq) |
7368 | ufshcd_suspend_clkscaling(hba); | |
1d337ec2 SRT |
7369 | ufshcd_setup_clocks(hba, false); |
7370 | ufshcd_setup_hba_vreg(hba, false); | |
7371 | hba->is_powered = false; | |
7372 | } | |
aa497613 SRT |
7373 | } |
7374 | ||
57d104c1 SJ |
7375 | static int |
7376 | ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) | |
7377 | { | |
7378 | unsigned char cmd[6] = {REQUEST_SENSE, | |
7379 | 0, | |
7380 | 0, | |
7381 | 0, | |
dcea0bfb | 7382 | UFSHCD_REQ_SENSE_SIZE, |
57d104c1 SJ |
7383 | 0}; |
7384 | char *buffer; | |
7385 | int ret; | |
7386 | ||
dcea0bfb | 7387 | buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL); |
57d104c1 SJ |
7388 | if (!buffer) { |
7389 | ret = -ENOMEM; | |
7390 | goto out; | |
7391 | } | |
7392 | ||
fcbfffe2 CH |
7393 | ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer, |
7394 | UFSHCD_REQ_SENSE_SIZE, NULL, NULL, | |
7395 | msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL); | |
57d104c1 SJ |
7396 | if (ret) |
7397 | pr_err("%s: failed with err %d\n", __func__, ret); | |
7398 | ||
7399 | kfree(buffer); | |
7400 | out: | |
7401 | return ret; | |
7402 | } | |
7403 | ||
7404 | /** | |
7405 | * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device | |
7406 | * power mode | |
7407 | * @hba: per adapter instance | |
7408 | * @pwr_mode: device power mode to set | |
7409 | * | |
7410 | * Returns 0 if requested power mode is set successfully | |
7411 | * Returns non-zero if failed to set the requested power mode | |
7412 | */ | |
7413 | static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, | |
7414 | enum ufs_dev_pwr_mode pwr_mode) | |
7415 | { | |
7416 | unsigned char cmd[6] = { START_STOP }; | |
7417 | struct scsi_sense_hdr sshdr; | |
7c48bfd0 AM |
7418 | struct scsi_device *sdp; |
7419 | unsigned long flags; | |
57d104c1 SJ |
7420 | int ret; |
7421 | ||
7c48bfd0 AM |
7422 | spin_lock_irqsave(hba->host->host_lock, flags); |
7423 | sdp = hba->sdev_ufs_device; | |
7424 | if (sdp) { | |
7425 | ret = scsi_device_get(sdp); | |
7426 | if (!ret && !scsi_device_online(sdp)) { | |
7427 | ret = -ENODEV; | |
7428 | scsi_device_put(sdp); | |
7429 | } | |
7430 | } else { | |
7431 | ret = -ENODEV; | |
7432 | } | |
7433 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
7434 | ||
7435 | if (ret) | |
7436 | return ret; | |
57d104c1 SJ |
7437 | |
7438 | /* | |
7439 | * If scsi commands fail, the scsi mid-layer schedules scsi error- | |
7440 | * handling, which would wait for host to be resumed. Since we know | |
7441 | * we are functional while we are here, skip host resume in error | |
7442 | * handling context. | |
7443 | */ | |
7444 | hba->host->eh_noresume = 1; | |
7445 | if (hba->wlun_dev_clr_ua) { | |
7446 | ret = ufshcd_send_request_sense(hba, sdp); | |
7447 | if (ret) | |
7448 | goto out; | |
7449 | /* Unit attention condition is cleared now */ | |
7450 | hba->wlun_dev_clr_ua = false; | |
7451 | } | |
7452 | ||
7453 | cmd[4] = pwr_mode << 4; | |
7454 | ||
7455 | /* | |
7456 | * Current function would be generally called from the power management | |
e8064021 | 7457 | * callbacks hence set the RQF_PM flag so that it doesn't resume the |
57d104c1 SJ |
7458 | * already suspended childs. |
7459 | */ | |
fcbfffe2 CH |
7460 | ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, |
7461 | START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL); | |
57d104c1 SJ |
7462 | if (ret) { |
7463 | sdev_printk(KERN_WARNING, sdp, | |
ef61329d HR |
7464 | "START_STOP failed for power mode: %d, result %x\n", |
7465 | pwr_mode, ret); | |
c65be1a6 | 7466 | if (driver_byte(ret) == DRIVER_SENSE) |
21045519 | 7467 | scsi_print_sense_hdr(sdp, NULL, &sshdr); |
57d104c1 SJ |
7468 | } |
7469 | ||
7470 | if (!ret) | |
7471 | hba->curr_dev_pwr_mode = pwr_mode; | |
7472 | out: | |
7c48bfd0 | 7473 | scsi_device_put(sdp); |
57d104c1 SJ |
7474 | hba->host->eh_noresume = 0; |
7475 | return ret; | |
7476 | } | |
7477 | ||
7478 | static int ufshcd_link_state_transition(struct ufs_hba *hba, | |
7479 | enum uic_link_state req_link_state, | |
7480 | int check_for_bkops) | |
7481 | { | |
7482 | int ret = 0; | |
7483 | ||
7484 | if (req_link_state == hba->uic_link_state) | |
7485 | return 0; | |
7486 | ||
7487 | if (req_link_state == UIC_LINK_HIBERN8_STATE) { | |
7488 | ret = ufshcd_uic_hibern8_enter(hba); | |
7489 | if (!ret) | |
7490 | ufshcd_set_link_hibern8(hba); | |
7491 | else | |
7492 | goto out; | |
7493 | } | |
7494 | /* | |
7495 | * If autobkops is enabled, link can't be turned off because | |
7496 | * turning off the link would also turn off the device. | |
7497 | */ | |
7498 | else if ((req_link_state == UIC_LINK_OFF_STATE) && | |
7499 | (!check_for_bkops || (check_for_bkops && | |
7500 | !hba->auto_bkops_enabled))) { | |
f3099fbd YG |
7501 | /* |
7502 | * Let's make sure that link is in low power mode, we are doing | |
7503 | * this currently by putting the link in Hibern8. Otherway to | |
7504 | * put the link in low power mode is to send the DME end point | |
7505 | * to device and then send the DME reset command to local | |
7506 | * unipro. But putting the link in hibern8 is much faster. | |
7507 | */ | |
7508 | ret = ufshcd_uic_hibern8_enter(hba); | |
7509 | if (ret) | |
7510 | goto out; | |
57d104c1 SJ |
7511 | /* |
7512 | * Change controller state to "reset state" which | |
7513 | * should also put the link in off/reset state | |
7514 | */ | |
596585a2 | 7515 | ufshcd_hba_stop(hba, true); |
57d104c1 SJ |
7516 | /* |
7517 | * TODO: Check if we need any delay to make sure that | |
7518 | * controller is reset | |
7519 | */ | |
7520 | ufshcd_set_link_off(hba); | |
7521 | } | |
7522 | ||
7523 | out: | |
7524 | return ret; | |
7525 | } | |
7526 | ||
7527 | static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) | |
7528 | { | |
b799fdf7 YG |
7529 | /* |
7530 | * It seems some UFS devices may keep drawing more than sleep current | |
7531 | * (atleast for 500us) from UFS rails (especially from VCCQ rail). | |
7532 | * To avoid this situation, add 2ms delay before putting these UFS | |
7533 | * rails in LPM mode. | |
7534 | */ | |
7535 | if (!ufshcd_is_link_active(hba) && | |
7536 | hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) | |
7537 | usleep_range(2000, 2100); | |
7538 | ||
57d104c1 SJ |
7539 | /* |
7540 | * If UFS device is either in UFS_Sleep turn off VCC rail to save some | |
7541 | * power. | |
7542 | * | |
7543 | * If UFS device and link is in OFF state, all power supplies (VCC, | |
7544 | * VCCQ, VCCQ2) can be turned off if power on write protect is not | |
7545 | * required. If UFS link is inactive (Hibern8 or OFF state) and device | |
7546 | * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode. | |
7547 | * | |
7548 | * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway | |
7549 | * in low power state which would save some power. | |
7550 | */ | |
7551 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && | |
7552 | !hba->dev_info.is_lu_power_on_wp) { | |
7553 | ufshcd_setup_vreg(hba, false); | |
7554 | } else if (!ufshcd_is_ufs_dev_active(hba)) { | |
7555 | ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); | |
7556 | if (!ufshcd_is_link_active(hba)) { | |
7557 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); | |
7558 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); | |
7559 | } | |
7560 | } | |
7561 | } | |
7562 | ||
7563 | static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) | |
7564 | { | |
7565 | int ret = 0; | |
7566 | ||
7567 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && | |
7568 | !hba->dev_info.is_lu_power_on_wp) { | |
7569 | ret = ufshcd_setup_vreg(hba, true); | |
7570 | } else if (!ufshcd_is_ufs_dev_active(hba)) { | |
57d104c1 SJ |
7571 | if (!ret && !ufshcd_is_link_active(hba)) { |
7572 | ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); | |
7573 | if (ret) | |
7574 | goto vcc_disable; | |
7575 | ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); | |
7576 | if (ret) | |
7577 | goto vccq_lpm; | |
7578 | } | |
69d72ac8 | 7579 | ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); |
57d104c1 SJ |
7580 | } |
7581 | goto out; | |
7582 | ||
7583 | vccq_lpm: | |
7584 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); | |
7585 | vcc_disable: | |
7586 | ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); | |
7587 | out: | |
7588 | return ret; | |
7589 | } | |
7590 | ||
7591 | static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) | |
7592 | { | |
7593 | if (ufshcd_is_link_off(hba)) | |
7594 | ufshcd_setup_hba_vreg(hba, false); | |
7595 | } | |
7596 | ||
7597 | static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) | |
7598 | { | |
7599 | if (ufshcd_is_link_off(hba)) | |
7600 | ufshcd_setup_hba_vreg(hba, true); | |
7601 | } | |
7602 | ||
7a3e97b0 | 7603 | /** |
57d104c1 | 7604 | * ufshcd_suspend - helper function for suspend operations |
3b1d0580 | 7605 | * @hba: per adapter instance |
57d104c1 SJ |
7606 | * @pm_op: desired low power operation type |
7607 | * | |
7608 | * This function will try to put the UFS device and link into low power | |
7609 | * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl" | |
7610 | * (System PM level). | |
7611 | * | |
7612 | * If this function is called during shutdown, it will make sure that | |
7613 | * both UFS device and UFS link is powered off. | |
7a3e97b0 | 7614 | * |
57d104c1 SJ |
7615 | * NOTE: UFS device & link must be active before we enter in this function. |
7616 | * | |
7617 | * Returns 0 for success and non-zero for failure | |
7a3e97b0 | 7618 | */ |
57d104c1 | 7619 | static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) |
7a3e97b0 | 7620 | { |
57d104c1 SJ |
7621 | int ret = 0; |
7622 | enum ufs_pm_level pm_lvl; | |
7623 | enum ufs_dev_pwr_mode req_dev_pwr_mode; | |
7624 | enum uic_link_state req_link_state; | |
7625 | ||
7626 | hba->pm_op_in_progress = 1; | |
7627 | if (!ufshcd_is_shutdown_pm(pm_op)) { | |
7628 | pm_lvl = ufshcd_is_runtime_pm(pm_op) ? | |
7629 | hba->rpm_lvl : hba->spm_lvl; | |
7630 | req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl); | |
7631 | req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl); | |
7632 | } else { | |
7633 | req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE; | |
7634 | req_link_state = UIC_LINK_OFF_STATE; | |
7635 | } | |
7636 | ||
7a3e97b0 | 7637 | /* |
57d104c1 SJ |
7638 | * If we can't transition into any of the low power modes |
7639 | * just gate the clocks. | |
7a3e97b0 | 7640 | */ |
1ab27c9c ST |
7641 | ufshcd_hold(hba, false); |
7642 | hba->clk_gating.is_suspended = true; | |
7643 | ||
401f1e44 SJ |
7644 | if (hba->clk_scaling.is_allowed) { |
7645 | cancel_work_sync(&hba->clk_scaling.suspend_work); | |
7646 | cancel_work_sync(&hba->clk_scaling.resume_work); | |
7647 | ufshcd_suspend_clkscaling(hba); | |
7648 | } | |
d6fcf81a | 7649 | |
57d104c1 SJ |
7650 | if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && |
7651 | req_link_state == UIC_LINK_ACTIVE_STATE) { | |
7652 | goto disable_clks; | |
7653 | } | |
7a3e97b0 | 7654 | |
57d104c1 SJ |
7655 | if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && |
7656 | (req_link_state == hba->uic_link_state)) | |
d6fcf81a | 7657 | goto enable_gating; |
57d104c1 SJ |
7658 | |
7659 | /* UFS device & link must be active before we enter in this function */ | |
7660 | if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { | |
7661 | ret = -EINVAL; | |
d6fcf81a | 7662 | goto enable_gating; |
57d104c1 SJ |
7663 | } |
7664 | ||
7665 | if (ufshcd_is_runtime_pm(pm_op)) { | |
374a246e SJ |
7666 | if (ufshcd_can_autobkops_during_suspend(hba)) { |
7667 | /* | |
7668 | * The device is idle with no requests in the queue, | |
7669 | * allow background operations if bkops status shows | |
7670 | * that performance might be impacted. | |
7671 | */ | |
7672 | ret = ufshcd_urgent_bkops(hba); | |
7673 | if (ret) | |
7674 | goto enable_gating; | |
7675 | } else { | |
7676 | /* make sure that auto bkops is disabled */ | |
7677 | ufshcd_disable_auto_bkops(hba); | |
7678 | } | |
57d104c1 SJ |
7679 | } |
7680 | ||
7681 | if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && | |
7682 | ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || | |
7683 | !ufshcd_is_runtime_pm(pm_op))) { | |
7684 | /* ensure that bkops is disabled */ | |
7685 | ufshcd_disable_auto_bkops(hba); | |
7686 | ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); | |
7687 | if (ret) | |
1ab27c9c | 7688 | goto enable_gating; |
57d104c1 SJ |
7689 | } |
7690 | ||
7691 | ret = ufshcd_link_state_transition(hba, req_link_state, 1); | |
7692 | if (ret) | |
7693 | goto set_dev_active; | |
7694 | ||
7695 | ufshcd_vreg_set_lpm(hba); | |
7696 | ||
7697 | disable_clks: | |
7698 | /* | |
7699 | * Call vendor specific suspend callback. As these callbacks may access | |
7700 | * vendor specific host controller register space call them before the | |
7701 | * host clocks are ON. | |
7702 | */ | |
0263bcd0 YG |
7703 | ret = ufshcd_vops_suspend(hba, pm_op); |
7704 | if (ret) | |
7705 | goto set_link_active; | |
57d104c1 | 7706 | |
57d104c1 SJ |
7707 | if (!ufshcd_is_link_active(hba)) |
7708 | ufshcd_setup_clocks(hba, false); | |
7709 | else | |
7710 | /* If link is active, device ref_clk can't be switched off */ | |
7711 | __ufshcd_setup_clocks(hba, false, true); | |
7712 | ||
1ab27c9c | 7713 | hba->clk_gating.state = CLKS_OFF; |
7ff5ab47 | 7714 | trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); |
57d104c1 SJ |
7715 | /* |
7716 | * Disable the host irq as host controller as there won't be any | |
0263bcd0 | 7717 | * host controller transaction expected till resume. |
57d104c1 SJ |
7718 | */ |
7719 | ufshcd_disable_irq(hba); | |
7720 | /* Put the host controller in low power mode if possible */ | |
7721 | ufshcd_hba_vreg_set_lpm(hba); | |
7722 | goto out; | |
7723 | ||
57d104c1 | 7724 | set_link_active: |
401f1e44 SJ |
7725 | if (hba->clk_scaling.is_allowed) |
7726 | ufshcd_resume_clkscaling(hba); | |
57d104c1 SJ |
7727 | ufshcd_vreg_set_hpm(hba); |
7728 | if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) | |
7729 | ufshcd_set_link_active(hba); | |
7730 | else if (ufshcd_is_link_off(hba)) | |
7731 | ufshcd_host_reset_and_restore(hba); | |
7732 | set_dev_active: | |
7733 | if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) | |
7734 | ufshcd_disable_auto_bkops(hba); | |
1ab27c9c | 7735 | enable_gating: |
401f1e44 SJ |
7736 | if (hba->clk_scaling.is_allowed) |
7737 | ufshcd_resume_clkscaling(hba); | |
1ab27c9c ST |
7738 | hba->clk_gating.is_suspended = false; |
7739 | ufshcd_release(hba); | |
57d104c1 SJ |
7740 | out: |
7741 | hba->pm_op_in_progress = 0; | |
7742 | return ret; | |
7a3e97b0 SY |
7743 | } |
7744 | ||
7745 | /** | |
57d104c1 | 7746 | * ufshcd_resume - helper function for resume operations |
3b1d0580 | 7747 | * @hba: per adapter instance |
57d104c1 | 7748 | * @pm_op: runtime PM or system PM |
7a3e97b0 | 7749 | * |
57d104c1 SJ |
7750 | * This function basically brings the UFS device, UniPro link and controller |
7751 | * to active state. | |
7752 | * | |
7753 | * Returns 0 for success and non-zero for failure | |
7a3e97b0 | 7754 | */ |
57d104c1 | 7755 | static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) |
7a3e97b0 | 7756 | { |
57d104c1 SJ |
7757 | int ret; |
7758 | enum uic_link_state old_link_state; | |
7759 | ||
7760 | hba->pm_op_in_progress = 1; | |
7761 | old_link_state = hba->uic_link_state; | |
7762 | ||
7763 | ufshcd_hba_vreg_set_hpm(hba); | |
7764 | /* Make sure clocks are enabled before accessing controller */ | |
7765 | ret = ufshcd_setup_clocks(hba, true); | |
7766 | if (ret) | |
7767 | goto out; | |
7768 | ||
57d104c1 SJ |
7769 | /* enable the host irq as host controller would be active soon */ |
7770 | ret = ufshcd_enable_irq(hba); | |
7771 | if (ret) | |
7772 | goto disable_irq_and_vops_clks; | |
7773 | ||
7774 | ret = ufshcd_vreg_set_hpm(hba); | |
7775 | if (ret) | |
7776 | goto disable_irq_and_vops_clks; | |
7777 | ||
7a3e97b0 | 7778 | /* |
57d104c1 SJ |
7779 | * Call vendor specific resume callback. As these callbacks may access |
7780 | * vendor specific host controller register space call them when the | |
7781 | * host clocks are ON. | |
7a3e97b0 | 7782 | */ |
0263bcd0 YG |
7783 | ret = ufshcd_vops_resume(hba, pm_op); |
7784 | if (ret) | |
7785 | goto disable_vreg; | |
57d104c1 SJ |
7786 | |
7787 | if (ufshcd_is_link_hibern8(hba)) { | |
7788 | ret = ufshcd_uic_hibern8_exit(hba); | |
7789 | if (!ret) | |
7790 | ufshcd_set_link_active(hba); | |
7791 | else | |
7792 | goto vendor_suspend; | |
7793 | } else if (ufshcd_is_link_off(hba)) { | |
7794 | ret = ufshcd_host_reset_and_restore(hba); | |
7795 | /* | |
7796 | * ufshcd_host_reset_and_restore() should have already | |
7797 | * set the link state as active | |
7798 | */ | |
7799 | if (ret || !ufshcd_is_link_active(hba)) | |
7800 | goto vendor_suspend; | |
7801 | } | |
7802 | ||
7803 | if (!ufshcd_is_ufs_dev_active(hba)) { | |
7804 | ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); | |
7805 | if (ret) | |
7806 | goto set_old_link_state; | |
7807 | } | |
7808 | ||
4e768e76 SJ |
7809 | if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) |
7810 | ufshcd_enable_auto_bkops(hba); | |
7811 | else | |
7812 | /* | |
7813 | * If BKOPs operations are urgently needed at this moment then | |
7814 | * keep auto-bkops enabled or else disable it. | |
7815 | */ | |
7816 | ufshcd_urgent_bkops(hba); | |
7817 | ||
1ab27c9c ST |
7818 | hba->clk_gating.is_suspended = false; |
7819 | ||
fcb0c4b0 ST |
7820 | if (hba->clk_scaling.is_allowed) |
7821 | ufshcd_resume_clkscaling(hba); | |
856b3483 | 7822 | |
1ab27c9c ST |
7823 | /* Schedule clock gating in case of no access to UFS device yet */ |
7824 | ufshcd_release(hba); | |
ad448378 AH |
7825 | |
7826 | /* Enable Auto-Hibernate if configured */ | |
7827 | ufshcd_auto_hibern8_enable(hba); | |
7828 | ||
57d104c1 SJ |
7829 | goto out; |
7830 | ||
7831 | set_old_link_state: | |
7832 | ufshcd_link_state_transition(hba, old_link_state, 0); | |
7833 | vendor_suspend: | |
0263bcd0 | 7834 | ufshcd_vops_suspend(hba, pm_op); |
57d104c1 SJ |
7835 | disable_vreg: |
7836 | ufshcd_vreg_set_lpm(hba); | |
7837 | disable_irq_and_vops_clks: | |
7838 | ufshcd_disable_irq(hba); | |
401f1e44 SJ |
7839 | if (hba->clk_scaling.is_allowed) |
7840 | ufshcd_suspend_clkscaling(hba); | |
57d104c1 SJ |
7841 | ufshcd_setup_clocks(hba, false); |
7842 | out: | |
7843 | hba->pm_op_in_progress = 0; | |
7844 | return ret; | |
7845 | } | |
7846 | ||
7847 | /** | |
7848 | * ufshcd_system_suspend - system suspend routine | |
7849 | * @hba: per adapter instance | |
57d104c1 SJ |
7850 | * |
7851 | * Check the description of ufshcd_suspend() function for more details. | |
7852 | * | |
7853 | * Returns 0 for success and non-zero for failure | |
7854 | */ | |
7855 | int ufshcd_system_suspend(struct ufs_hba *hba) | |
7856 | { | |
7857 | int ret = 0; | |
7ff5ab47 | 7858 | ktime_t start = ktime_get(); |
57d104c1 SJ |
7859 | |
7860 | if (!hba || !hba->is_powered) | |
233b594b | 7861 | return 0; |
57d104c1 | 7862 | |
0b257734 SJ |
7863 | if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == |
7864 | hba->curr_dev_pwr_mode) && | |
7865 | (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) == | |
7866 | hba->uic_link_state)) | |
7867 | goto out; | |
57d104c1 | 7868 | |
0b257734 | 7869 | if (pm_runtime_suspended(hba->dev)) { |
57d104c1 SJ |
7870 | /* |
7871 | * UFS device and/or UFS link low power states during runtime | |
7872 | * suspend seems to be different than what is expected during | |
7873 | * system suspend. Hence runtime resume the devic & link and | |
7874 | * let the system suspend low power states to take effect. | |
7875 | * TODO: If resume takes longer time, we might have optimize | |
7876 | * it in future by not resuming everything if possible. | |
7877 | */ | |
7878 | ret = ufshcd_runtime_resume(hba); | |
7879 | if (ret) | |
7880 | goto out; | |
7881 | } | |
7882 | ||
7883 | ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); | |
7884 | out: | |
7ff5ab47 SJ |
7885 | trace_ufshcd_system_suspend(dev_name(hba->dev), ret, |
7886 | ktime_to_us(ktime_sub(ktime_get(), start)), | |
73eba2be | 7887 | hba->curr_dev_pwr_mode, hba->uic_link_state); |
e785060e DR |
7888 | if (!ret) |
7889 | hba->is_sys_suspended = true; | |
57d104c1 SJ |
7890 | return ret; |
7891 | } | |
7892 | EXPORT_SYMBOL(ufshcd_system_suspend); | |
7893 | ||
7894 | /** | |
7895 | * ufshcd_system_resume - system resume routine | |
7896 | * @hba: per adapter instance | |
7897 | * | |
7898 | * Returns 0 for success and non-zero for failure | |
7899 | */ | |
7a3e97b0 | 7900 | |
57d104c1 SJ |
7901 | int ufshcd_system_resume(struct ufs_hba *hba) |
7902 | { | |
7ff5ab47 SJ |
7903 | int ret = 0; |
7904 | ktime_t start = ktime_get(); | |
7905 | ||
e3ce73d6 YG |
7906 | if (!hba) |
7907 | return -EINVAL; | |
7908 | ||
7909 | if (!hba->is_powered || pm_runtime_suspended(hba->dev)) | |
57d104c1 SJ |
7910 | /* |
7911 | * Let the runtime resume take care of resuming | |
7912 | * if runtime suspended. | |
7913 | */ | |
7ff5ab47 SJ |
7914 | goto out; |
7915 | else | |
7916 | ret = ufshcd_resume(hba, UFS_SYSTEM_PM); | |
7917 | out: | |
7918 | trace_ufshcd_system_resume(dev_name(hba->dev), ret, | |
7919 | ktime_to_us(ktime_sub(ktime_get(), start)), | |
73eba2be | 7920 | hba->curr_dev_pwr_mode, hba->uic_link_state); |
7ff5ab47 | 7921 | return ret; |
7a3e97b0 | 7922 | } |
57d104c1 | 7923 | EXPORT_SYMBOL(ufshcd_system_resume); |
3b1d0580 | 7924 | |
57d104c1 SJ |
7925 | /** |
7926 | * ufshcd_runtime_suspend - runtime suspend routine | |
7927 | * @hba: per adapter instance | |
7928 | * | |
7929 | * Check the description of ufshcd_suspend() function for more details. | |
7930 | * | |
7931 | * Returns 0 for success and non-zero for failure | |
7932 | */ | |
66ec6d59 SRT |
7933 | int ufshcd_runtime_suspend(struct ufs_hba *hba) |
7934 | { | |
7ff5ab47 SJ |
7935 | int ret = 0; |
7936 | ktime_t start = ktime_get(); | |
7937 | ||
e3ce73d6 YG |
7938 | if (!hba) |
7939 | return -EINVAL; | |
7940 | ||
7941 | if (!hba->is_powered) | |
7ff5ab47 SJ |
7942 | goto out; |
7943 | else | |
7944 | ret = ufshcd_suspend(hba, UFS_RUNTIME_PM); | |
7945 | out: | |
7946 | trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, | |
7947 | ktime_to_us(ktime_sub(ktime_get(), start)), | |
73eba2be | 7948 | hba->curr_dev_pwr_mode, hba->uic_link_state); |
7ff5ab47 | 7949 | return ret; |
66ec6d59 SRT |
7950 | } |
7951 | EXPORT_SYMBOL(ufshcd_runtime_suspend); | |
7952 | ||
57d104c1 SJ |
7953 | /** |
7954 | * ufshcd_runtime_resume - runtime resume routine | |
7955 | * @hba: per adapter instance | |
7956 | * | |
7957 | * This function basically brings the UFS device, UniPro link and controller | |
7958 | * to active state. Following operations are done in this function: | |
7959 | * | |
7960 | * 1. Turn on all the controller related clocks | |
7961 | * 2. Bring the UniPro link out of Hibernate state | |
7962 | * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device | |
7963 | * to active state. | |
7964 | * 4. If auto-bkops is enabled on the device, disable it. | |
7965 | * | |
7966 | * So following would be the possible power state after this function return | |
7967 | * successfully: | |
7968 | * S1: UFS device in Active state with VCC rail ON | |
7969 | * UniPro link in Active state | |
7970 | * All the UFS/UniPro controller clocks are ON | |
7971 | * | |
7972 | * Returns 0 for success and non-zero for failure | |
7973 | */ | |
66ec6d59 SRT |
7974 | int ufshcd_runtime_resume(struct ufs_hba *hba) |
7975 | { | |
7ff5ab47 SJ |
7976 | int ret = 0; |
7977 | ktime_t start = ktime_get(); | |
7978 | ||
e3ce73d6 YG |
7979 | if (!hba) |
7980 | return -EINVAL; | |
7981 | ||
7982 | if (!hba->is_powered) | |
7ff5ab47 SJ |
7983 | goto out; |
7984 | else | |
7985 | ret = ufshcd_resume(hba, UFS_RUNTIME_PM); | |
7986 | out: | |
7987 | trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, | |
7988 | ktime_to_us(ktime_sub(ktime_get(), start)), | |
73eba2be | 7989 | hba->curr_dev_pwr_mode, hba->uic_link_state); |
7ff5ab47 | 7990 | return ret; |
66ec6d59 SRT |
7991 | } |
7992 | EXPORT_SYMBOL(ufshcd_runtime_resume); | |
7993 | ||
7994 | int ufshcd_runtime_idle(struct ufs_hba *hba) | |
7995 | { | |
7996 | return 0; | |
7997 | } | |
7998 | EXPORT_SYMBOL(ufshcd_runtime_idle); | |
7999 | ||
57d104c1 SJ |
8000 | /** |
8001 | * ufshcd_shutdown - shutdown routine | |
8002 | * @hba: per adapter instance | |
8003 | * | |
8004 | * This function would power off both UFS device and UFS link. | |
8005 | * | |
8006 | * Returns 0 always to allow force shutdown even in case of errors. | |
8007 | */ | |
8008 | int ufshcd_shutdown(struct ufs_hba *hba) | |
8009 | { | |
8010 | int ret = 0; | |
8011 | ||
8012 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) | |
8013 | goto out; | |
8014 | ||
8015 | if (pm_runtime_suspended(hba->dev)) { | |
8016 | ret = ufshcd_runtime_resume(hba); | |
8017 | if (ret) | |
8018 | goto out; | |
8019 | } | |
8020 | ||
8021 | ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); | |
8022 | out: | |
8023 | if (ret) | |
8024 | dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); | |
8025 | /* allow force shutdown even in case of errors */ | |
8026 | return 0; | |
8027 | } | |
8028 | EXPORT_SYMBOL(ufshcd_shutdown); | |
8029 | ||
7a3e97b0 | 8030 | /** |
3b1d0580 | 8031 | * ufshcd_remove - de-allocate SCSI host and host memory space |
7a3e97b0 | 8032 | * data structure memory |
8aa29f19 | 8033 | * @hba: per adapter instance |
7a3e97b0 | 8034 | */ |
3b1d0580 | 8035 | void ufshcd_remove(struct ufs_hba *hba) |
7a3e97b0 | 8036 | { |
df032bf2 | 8037 | ufs_bsg_remove(hba); |
cbb6813e | 8038 | ufs_sysfs_remove_nodes(hba->dev); |
cfdf9c91 | 8039 | scsi_remove_host(hba->host); |
7a3e97b0 | 8040 | /* disable interrupts */ |
2fbd009b | 8041 | ufshcd_disable_intr(hba, hba->intr_mask); |
596585a2 | 8042 | ufshcd_hba_stop(hba, true); |
7a3e97b0 | 8043 | |
eebcc196 | 8044 | ufshcd_exit_clk_scaling(hba); |
1ab27c9c | 8045 | ufshcd_exit_clk_gating(hba); |
fcb0c4b0 ST |
8046 | if (ufshcd_is_clkscaling_supported(hba)) |
8047 | device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); | |
aa497613 | 8048 | ufshcd_hba_exit(hba); |
3b1d0580 VH |
8049 | } |
8050 | EXPORT_SYMBOL_GPL(ufshcd_remove); | |
8051 | ||
47555a5c YG |
8052 | /** |
8053 | * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA) | |
8054 | * @hba: pointer to Host Bus Adapter (HBA) | |
8055 | */ | |
8056 | void ufshcd_dealloc_host(struct ufs_hba *hba) | |
8057 | { | |
8058 | scsi_host_put(hba->host); | |
8059 | } | |
8060 | EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); | |
8061 | ||
ca3d7bf9 AM |
8062 | /** |
8063 | * ufshcd_set_dma_mask - Set dma mask based on the controller | |
8064 | * addressing capability | |
8065 | * @hba: per adapter instance | |
8066 | * | |
8067 | * Returns 0 for success, non-zero for failure | |
8068 | */ | |
8069 | static int ufshcd_set_dma_mask(struct ufs_hba *hba) | |
8070 | { | |
8071 | if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { | |
8072 | if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) | |
8073 | return 0; | |
8074 | } | |
8075 | return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); | |
8076 | } | |
8077 | ||
7a3e97b0 | 8078 | /** |
5c0c28a8 | 8079 | * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) |
3b1d0580 VH |
8080 | * @dev: pointer to device handle |
8081 | * @hba_handle: driver private handle | |
7a3e97b0 SY |
8082 | * Returns 0 on success, non-zero value on failure |
8083 | */ | |
5c0c28a8 | 8084 | int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) |
7a3e97b0 SY |
8085 | { |
8086 | struct Scsi_Host *host; | |
8087 | struct ufs_hba *hba; | |
5c0c28a8 | 8088 | int err = 0; |
7a3e97b0 | 8089 | |
3b1d0580 VH |
8090 | if (!dev) { |
8091 | dev_err(dev, | |
8092 | "Invalid memory reference for dev is NULL\n"); | |
8093 | err = -ENODEV; | |
7a3e97b0 SY |
8094 | goto out_error; |
8095 | } | |
8096 | ||
7a3e97b0 SY |
8097 | host = scsi_host_alloc(&ufshcd_driver_template, |
8098 | sizeof(struct ufs_hba)); | |
8099 | if (!host) { | |
3b1d0580 | 8100 | dev_err(dev, "scsi_host_alloc failed\n"); |
7a3e97b0 | 8101 | err = -ENOMEM; |
3b1d0580 | 8102 | goto out_error; |
7a3e97b0 SY |
8103 | } |
8104 | hba = shost_priv(host); | |
7a3e97b0 | 8105 | hba->host = host; |
3b1d0580 | 8106 | hba->dev = dev; |
5c0c28a8 SRT |
8107 | *hba_handle = hba; |
8108 | ||
566ec9ad SM |
8109 | INIT_LIST_HEAD(&hba->clk_list_head); |
8110 | ||
5c0c28a8 SRT |
8111 | out_error: |
8112 | return err; | |
8113 | } | |
8114 | EXPORT_SYMBOL(ufshcd_alloc_host); | |
8115 | ||
8116 | /** | |
8117 | * ufshcd_init - Driver initialization routine | |
8118 | * @hba: per-adapter instance | |
8119 | * @mmio_base: base register address | |
8120 | * @irq: Interrupt line of device | |
8121 | * Returns 0 on success, non-zero value on failure | |
8122 | */ | |
8123 | int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) | |
8124 | { | |
8125 | int err; | |
8126 | struct Scsi_Host *host = hba->host; | |
8127 | struct device *dev = hba->dev; | |
8128 | ||
8129 | if (!mmio_base) { | |
8130 | dev_err(hba->dev, | |
8131 | "Invalid memory reference for mmio_base is NULL\n"); | |
8132 | err = -ENODEV; | |
8133 | goto out_error; | |
8134 | } | |
8135 | ||
3b1d0580 VH |
8136 | hba->mmio_base = mmio_base; |
8137 | hba->irq = irq; | |
7a3e97b0 | 8138 | |
a4b0e8a4 PM |
8139 | /* Set descriptor lengths to specification defaults */ |
8140 | ufshcd_def_desc_sizes(hba); | |
8141 | ||
aa497613 | 8142 | err = ufshcd_hba_init(hba); |
5c0c28a8 SRT |
8143 | if (err) |
8144 | goto out_error; | |
8145 | ||
7a3e97b0 SY |
8146 | /* Read capabilities registers */ |
8147 | ufshcd_hba_capabilities(hba); | |
8148 | ||
8149 | /* Get UFS version supported by the controller */ | |
8150 | hba->ufs_version = ufshcd_get_ufs_version(hba); | |
8151 | ||
c01848c6 YG |
8152 | if ((hba->ufs_version != UFSHCI_VERSION_10) && |
8153 | (hba->ufs_version != UFSHCI_VERSION_11) && | |
8154 | (hba->ufs_version != UFSHCI_VERSION_20) && | |
8155 | (hba->ufs_version != UFSHCI_VERSION_21)) | |
8156 | dev_err(hba->dev, "invalid UFS version 0x%x\n", | |
8157 | hba->ufs_version); | |
8158 | ||
2fbd009b SJ |
8159 | /* Get Interrupt bit mask per version */ |
8160 | hba->intr_mask = ufshcd_get_intr_mask(hba); | |
8161 | ||
ca3d7bf9 AM |
8162 | err = ufshcd_set_dma_mask(hba); |
8163 | if (err) { | |
8164 | dev_err(hba->dev, "set dma mask failed\n"); | |
8165 | goto out_disable; | |
8166 | } | |
8167 | ||
7a3e97b0 SY |
8168 | /* Allocate memory for host memory space */ |
8169 | err = ufshcd_memory_alloc(hba); | |
8170 | if (err) { | |
3b1d0580 VH |
8171 | dev_err(hba->dev, "Memory allocation failed\n"); |
8172 | goto out_disable; | |
7a3e97b0 SY |
8173 | } |
8174 | ||
8175 | /* Configure LRB */ | |
8176 | ufshcd_host_memory_configure(hba); | |
8177 | ||
8178 | host->can_queue = hba->nutrs; | |
8179 | host->cmd_per_lun = hba->nutrs; | |
8180 | host->max_id = UFSHCD_MAX_ID; | |
0ce147d4 | 8181 | host->max_lun = UFS_MAX_LUNS; |
7a3e97b0 SY |
8182 | host->max_channel = UFSHCD_MAX_CHANNEL; |
8183 | host->unique_id = host->host_no; | |
a851b2bd | 8184 | host->max_cmd_len = UFS_CDB_SIZE; |
7a3e97b0 | 8185 | |
7eb584db DR |
8186 | hba->max_pwr_info.is_valid = false; |
8187 | ||
7a3e97b0 | 8188 | /* Initailize wait queue for task management */ |
e2933132 SRT |
8189 | init_waitqueue_head(&hba->tm_wq); |
8190 | init_waitqueue_head(&hba->tm_tag_wq); | |
7a3e97b0 SY |
8191 | |
8192 | /* Initialize work queues */ | |
e8e7f271 | 8193 | INIT_WORK(&hba->eh_work, ufshcd_err_handler); |
66ec6d59 | 8194 | INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); |
7a3e97b0 | 8195 | |
6ccf44fe SJ |
8196 | /* Initialize UIC command mutex */ |
8197 | mutex_init(&hba->uic_cmd_mutex); | |
8198 | ||
5a0b0cb9 SRT |
8199 | /* Initialize mutex for device management commands */ |
8200 | mutex_init(&hba->dev_cmd.lock); | |
8201 | ||
a3cd5ec5 SJ |
8202 | init_rwsem(&hba->clk_scaling_lock); |
8203 | ||
5a0b0cb9 SRT |
8204 | /* Initialize device management tag acquire wait queue */ |
8205 | init_waitqueue_head(&hba->dev_cmd.tag_wq); | |
8206 | ||
1ab27c9c | 8207 | ufshcd_init_clk_gating(hba); |
199ef13c | 8208 | |
eebcc196 VG |
8209 | ufshcd_init_clk_scaling(hba); |
8210 | ||
199ef13c YG |
8211 | /* |
8212 | * In order to avoid any spurious interrupt immediately after | |
8213 | * registering UFS controller interrupt handler, clear any pending UFS | |
8214 | * interrupt status and disable all the UFS interrupts. | |
8215 | */ | |
8216 | ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), | |
8217 | REG_INTERRUPT_STATUS); | |
8218 | ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); | |
8219 | /* | |
8220 | * Make sure that UFS interrupts are disabled and any pending interrupt | |
8221 | * status is cleared before registering UFS interrupt handler. | |
8222 | */ | |
8223 | mb(); | |
8224 | ||
7a3e97b0 | 8225 | /* IRQ registration */ |
2953f850 | 8226 | err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); |
7a3e97b0 | 8227 | if (err) { |
3b1d0580 | 8228 | dev_err(hba->dev, "request irq failed\n"); |
1ab27c9c | 8229 | goto exit_gating; |
57d104c1 SJ |
8230 | } else { |
8231 | hba->is_irq_enabled = true; | |
7a3e97b0 SY |
8232 | } |
8233 | ||
3b1d0580 | 8234 | err = scsi_add_host(host, hba->dev); |
7a3e97b0 | 8235 | if (err) { |
3b1d0580 | 8236 | dev_err(hba->dev, "scsi_add_host failed\n"); |
1ab27c9c | 8237 | goto exit_gating; |
7a3e97b0 SY |
8238 | } |
8239 | ||
6ccf44fe SJ |
8240 | /* Host controller enable */ |
8241 | err = ufshcd_hba_enable(hba); | |
7a3e97b0 | 8242 | if (err) { |
6ccf44fe | 8243 | dev_err(hba->dev, "Host controller enable failed\n"); |
66cc820f | 8244 | ufshcd_print_host_regs(hba); |
6ba65588 | 8245 | ufshcd_print_host_state(hba); |
3b1d0580 | 8246 | goto out_remove_scsi_host; |
7a3e97b0 | 8247 | } |
6ccf44fe | 8248 | |
0c8f7586 SJ |
8249 | /* |
8250 | * Set the default power management level for runtime and system PM. | |
8251 | * Default power saving mode is to keep UFS link in Hibern8 state | |
8252 | * and UFS device in sleep state. | |
8253 | */ | |
8254 | hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( | |
8255 | UFS_SLEEP_PWR_MODE, | |
8256 | UIC_LINK_HIBERN8_STATE); | |
8257 | hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( | |
8258 | UFS_SLEEP_PWR_MODE, | |
8259 | UIC_LINK_HIBERN8_STATE); | |
8260 | ||
ad448378 AH |
8261 | /* Set the default auto-hiberate idle timer value to 150 ms */ |
8262 | if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) { | |
8263 | hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | | |
8264 | FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); | |
8265 | } | |
8266 | ||
62694735 SRT |
8267 | /* Hold auto suspend until async scan completes */ |
8268 | pm_runtime_get_sync(dev); | |
38135535 | 8269 | atomic_set(&hba->scsi_block_reqs_cnt, 0); |
57d104c1 | 8270 | /* |
7caf489b SJ |
8271 | * We are assuming that device wasn't put in sleep/power-down |
8272 | * state exclusively during the boot stage before kernel. | |
8273 | * This assumption helps avoid doing link startup twice during | |
8274 | * ufshcd_probe_hba(). | |
57d104c1 | 8275 | */ |
7caf489b | 8276 | ufshcd_set_ufs_dev_active(hba); |
57d104c1 | 8277 | |
6ccf44fe | 8278 | async_schedule(ufshcd_async_scan, hba); |
cbb6813e | 8279 | ufs_sysfs_add_nodes(hba->dev); |
6ccf44fe | 8280 | |
7a3e97b0 SY |
8281 | return 0; |
8282 | ||
3b1d0580 VH |
8283 | out_remove_scsi_host: |
8284 | scsi_remove_host(hba->host); | |
1ab27c9c | 8285 | exit_gating: |
eebcc196 | 8286 | ufshcd_exit_clk_scaling(hba); |
1ab27c9c | 8287 | ufshcd_exit_clk_gating(hba); |
3b1d0580 | 8288 | out_disable: |
57d104c1 | 8289 | hba->is_irq_enabled = false; |
aa497613 | 8290 | ufshcd_hba_exit(hba); |
3b1d0580 VH |
8291 | out_error: |
8292 | return err; | |
8293 | } | |
8294 | EXPORT_SYMBOL_GPL(ufshcd_init); | |
8295 | ||
3b1d0580 VH |
8296 | MODULE_AUTHOR("Santosh Yaragnavi <[email protected]>"); |
8297 | MODULE_AUTHOR("Vinayak Holikatti <[email protected]>"); | |
e0eca63e | 8298 | MODULE_DESCRIPTION("Generic UFS host controller driver Core"); |
7a3e97b0 SY |
8299 | MODULE_LICENSE("GPL"); |
8300 | MODULE_VERSION(UFSHCD_DRIVER_VERSION); |