1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
47 #include <net/checksum.h>
49 #include <asm/unaligned.h>
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
61 #include "scsi_logging.h"
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20210520";
67 #define MY_NAME "scsi_debug"
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define WRITE_PROTECTED 0x27
80 #define UA_RESET_ASC 0x29
81 #define UA_CHANGED_ASC 0x2a
82 #define TARGET_CHANGED_ASC 0x3f
83 #define LUNS_CHANGED_ASCQ 0x0e
84 #define INSUFF_RES_ASC 0x55
85 #define INSUFF_RES_ASCQ 0x3
86 #define POWER_ON_RESET_ASCQ 0x0
87 #define POWER_ON_OCCURRED_ASCQ 0x1
88 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
89 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
90 #define CAPACITY_CHANGED_ASCQ 0x9
91 #define SAVING_PARAMS_UNSUP 0x39
92 #define TRANSPORT_PROBLEM 0x4b
93 #define THRESHOLD_EXCEEDED 0x5d
94 #define LOW_POWER_COND_ON 0x5e
95 #define MISCOMPARE_VERIFY_ASC 0x1d
96 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
97 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
98 #define WRITE_ERROR_ASC 0xc
99 #define UNALIGNED_WRITE_ASCQ 0x4
100 #define WRITE_BOUNDARY_ASCQ 0x5
101 #define READ_INVDATA_ASCQ 0x6
102 #define READ_BOUNDARY_ASCQ 0x7
103 #define ATTEMPT_ACCESS_GAP 0x9
104 #define INSUFF_ZONE_ASCQ 0xe
106 /* Additional Sense Code Qualifier (ASCQ) */
107 #define ACK_NAK_TO 0x3
109 /* Default values for driver parameters */
110 #define DEF_NUM_HOST 1
111 #define DEF_NUM_TGTS 1
112 #define DEF_MAX_LUNS 1
113 /* With these defaults, this driver will make 1 host with 1 target
114 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
117 #define DEF_CDB_LEN 10
118 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
119 #define DEF_DEV_SIZE_PRE_INIT 0
120 #define DEF_DEV_SIZE_MB 8
121 #define DEF_ZBC_DEV_SIZE_MB 128
124 #define DEF_PER_HOST_STORE false
125 #define DEF_D_SENSE 0
126 #define DEF_EVERY_NTH 0
127 #define DEF_FAKE_RW 0
129 #define DEF_HOST_LOCK 0
132 #define DEF_LBPWS10 0
134 #define DEF_LOWEST_ALIGNED 0
135 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
136 #define DEF_NO_LUN_0 0
137 #define DEF_NUM_PARTS 0
139 #define DEF_OPT_BLKS 1024
140 #define DEF_PHYSBLK_EXP 0
141 #define DEF_OPT_XFERLEN_EXP 0
142 #define DEF_PTYPE TYPE_DISK
143 #define DEF_RANDOM false
144 #define DEF_REMOVABLE false
145 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
146 #define DEF_SECTOR_SIZE 512
147 #define DEF_UNMAP_ALIGNMENT 0
148 #define DEF_UNMAP_GRANULARITY 1
149 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
150 #define DEF_UNMAP_MAX_DESC 256
151 #define DEF_VIRTUAL_GB 0
152 #define DEF_VPD_USE_HOSTNO 1
153 #define DEF_WRITESAME_LENGTH 0xFFFF
155 #define DEF_STATISTICS false
156 #define DEF_SUBMIT_QUEUES 1
157 #define DEF_TUR_MS_TO_READY 0
158 #define DEF_UUID_CTL 0
159 #define JDELAY_OVERRIDDEN -9999
161 /* Default parameters for ZBC drives */
162 #define DEF_ZBC_ZONE_SIZE_MB 128
163 #define DEF_ZBC_MAX_OPEN_ZONES 8
164 #define DEF_ZBC_NR_CONV_ZONES 1
166 #define SDEBUG_LUN_0_VAL 0
168 /* bit mask values for sdebug_opts */
169 #define SDEBUG_OPT_NOISE 1
170 #define SDEBUG_OPT_MEDIUM_ERR 2
171 #define SDEBUG_OPT_TIMEOUT 4
172 #define SDEBUG_OPT_RECOVERED_ERR 8
173 #define SDEBUG_OPT_TRANSPORT_ERR 16
174 #define SDEBUG_OPT_DIF_ERR 32
175 #define SDEBUG_OPT_DIX_ERR 64
176 #define SDEBUG_OPT_MAC_TIMEOUT 128
177 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
178 #define SDEBUG_OPT_Q_NOISE 0x200
179 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
180 #define SDEBUG_OPT_RARE_TSF 0x800
181 #define SDEBUG_OPT_N_WCE 0x1000
182 #define SDEBUG_OPT_RESET_NOISE 0x2000
183 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
184 #define SDEBUG_OPT_HOST_BUSY 0x8000
185 #define SDEBUG_OPT_CMD_ABORT 0x10000
186 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
187 SDEBUG_OPT_RESET_NOISE)
188 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
189 SDEBUG_OPT_TRANSPORT_ERR | \
190 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
191 SDEBUG_OPT_SHORT_TRANSFER | \
192 SDEBUG_OPT_HOST_BUSY | \
193 SDEBUG_OPT_CMD_ABORT)
194 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
195 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
197 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
198 * priority order. In the subset implemented here lower numbers have higher
199 * priority. The UA numbers should be a sequence starting from 0 with
200 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
201 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
202 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
203 #define SDEBUG_UA_BUS_RESET 2
204 #define SDEBUG_UA_MODE_CHANGED 3
205 #define SDEBUG_UA_CAPACITY_CHANGED 4
206 #define SDEBUG_UA_LUNS_CHANGED 5
207 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
208 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
209 #define SDEBUG_NUM_UAS 8
211 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
212 * sector on read commands: */
213 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
214 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
216 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
217 * (for response) per submit queue at one time. Can be reduced by max_queue
218 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
219 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
220 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
221 * but cannot exceed SDEBUG_CANQUEUE .
223 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
224 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
225 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
227 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
228 #define F_D_IN 1 /* Data-in command (e.g. READ) */
229 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
230 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
232 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
233 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
234 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
235 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
236 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
237 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
238 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
239 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
240 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
241 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
243 /* Useful combinations of the above flags */
244 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
245 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
246 #define FF_SA (F_SA_HIGH | F_SA_LOW)
247 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
249 #define SDEBUG_MAX_PARTS 4
251 #define SDEBUG_MAX_CMD_LEN 32
253 #define SDEB_XA_NOT_IN_USE XA_MARK_1
255 static struct kmem_cache *queued_cmd_cache;
257 #define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
258 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
260 /* Zone types (zbcr05 table 25) */
265 /* ZBC_ZTYPE_SOBR = 0x4, */
269 /* enumeration names taken from table 26, zbcr05 */
271 ZBC_NOT_WRITE_POINTER = 0x0,
273 ZC2_IMPLICIT_OPEN = 0x2,
274 ZC3_EXPLICIT_OPEN = 0x3,
281 struct sdeb_zone_state { /* ZBC: per zone state */
282 enum sdebug_z_type z_type;
283 enum sdebug_z_cond z_cond;
284 bool z_non_seq_resource;
290 enum sdebug_err_type {
291 ERR_TMOUT_CMD = 0, /* make specific scsi command timeout */
292 ERR_FAIL_QUEUE_CMD = 1, /* make specific scsi command's */
293 /* queuecmd return failed */
294 ERR_FAIL_CMD = 2, /* make specific scsi command's */
295 /* queuecmd return succeed but */
296 /* with errors set in scsi_cmnd */
297 ERR_ABORT_CMD_FAILED = 3, /* control return FAILED from */
298 /* scsi_debug_abort() */
299 ERR_LUN_RESET_FAILED = 4, /* control return FAILED from */
300 /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
303 struct sdebug_err_inject {
305 struct list_head list;
312 * For ERR_FAIL_QUEUE_CMD
320 unsigned char host_byte;
321 unsigned char driver_byte;
322 unsigned char status_byte;
323 unsigned char sense_key;
330 struct sdebug_dev_info {
331 struct list_head dev_list;
332 unsigned int channel;
336 struct sdebug_host_info *sdbg_host;
337 unsigned long uas_bm[1];
338 atomic_t stopped; /* 1: by SSU, 2: device start */
341 /* For ZBC devices */
342 enum blk_zoned_model zmodel;
345 unsigned int zsize_shift;
346 unsigned int nr_zones;
347 unsigned int nr_conv_zones;
348 unsigned int nr_seq_zones;
349 unsigned int nr_imp_open;
350 unsigned int nr_exp_open;
351 unsigned int nr_closed;
352 unsigned int max_open;
353 ktime_t create_ts; /* time since bootup that this device was created */
354 struct sdeb_zone_state *zstate;
356 struct dentry *debugfs_entry;
357 struct spinlock list_lock;
358 struct list_head inject_err_list;
361 struct sdebug_target_info {
363 struct dentry *debugfs_entry;
366 struct sdebug_host_info {
367 struct list_head host_list;
368 int si_idx; /* sdeb_store_info (per host) xarray index */
369 struct Scsi_Host *shost;
371 struct list_head dev_info_list;
374 /* There is an xarray of pointers to this struct's objects, one per host */
375 struct sdeb_store_info {
376 rwlock_t macc_lck; /* for atomic media access on this store */
377 u8 *storep; /* user data storage (ram) */
378 struct t10_pi_tuple *dif_storep; /* protection info */
379 void *map_storep; /* provisioning map */
382 #define dev_to_sdebug_host(d) \
383 container_of(d, struct sdebug_host_info, dev)
385 #define shost_to_sdebug_host(shost) \
386 dev_to_sdebug_host(shost->dma_dev)
388 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
389 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
391 struct sdebug_defer {
393 struct execute_work ew;
394 ktime_t cmpl_ts;/* time since boot to complete this cmd */
396 bool aborted; /* true when blk_abort_request() already called */
397 enum sdeb_defer_type defer_t;
400 struct sdebug_queued_cmd {
401 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
402 * instance indicates this slot is in use.
404 struct sdebug_defer sd_dp;
405 struct scsi_cmnd *scmd;
408 struct sdebug_scsi_cmd {
412 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
413 static atomic_t sdebug_completions; /* count of deferred completions */
414 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
415 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
416 static atomic_t sdeb_inject_pending;
417 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
419 struct opcode_info_t {
420 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
421 /* for terminating element */
422 u8 opcode; /* if num_attached > 0, preferred */
423 u16 sa; /* service action */
424 u32 flags; /* OR-ed set of SDEB_F_* */
425 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
426 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
427 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
428 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
431 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
432 enum sdeb_opcode_index {
433 SDEB_I_INVALID_OPCODE = 0,
435 SDEB_I_REPORT_LUNS = 2,
436 SDEB_I_REQUEST_SENSE = 3,
437 SDEB_I_TEST_UNIT_READY = 4,
438 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
439 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
440 SDEB_I_LOG_SENSE = 7,
441 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
442 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
443 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
444 SDEB_I_START_STOP = 11,
445 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
446 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
447 SDEB_I_MAINT_IN = 14,
448 SDEB_I_MAINT_OUT = 15,
449 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
450 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
451 SDEB_I_RESERVE = 18, /* 6, 10 */
452 SDEB_I_RELEASE = 19, /* 6, 10 */
453 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
454 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
455 SDEB_I_ATA_PT = 22, /* 12, 16 */
456 SDEB_I_SEND_DIAG = 23,
458 SDEB_I_WRITE_BUFFER = 25,
459 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
460 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
461 SDEB_I_COMP_WRITE = 28,
462 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
463 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
464 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
465 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
469 static const unsigned char opcode_ind_arr[256] = {
470 /* 0x0; 0x0->0x1f: 6 byte cdbs */
471 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
473 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
474 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
476 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
477 SDEB_I_ALLOW_REMOVAL, 0,
478 /* 0x20; 0x20->0x3f: 10 byte cdbs */
479 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
480 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
481 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
482 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
483 /* 0x40; 0x40->0x5f: 10 byte cdbs */
484 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
485 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
486 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
488 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
489 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
490 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
491 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492 0, SDEB_I_VARIABLE_LEN,
493 /* 0x80; 0x80->0x9f: 16 byte cdbs */
494 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
495 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
496 0, 0, 0, SDEB_I_VERIFY,
497 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
498 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
499 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
500 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
501 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
502 SDEB_I_MAINT_OUT, 0, 0, 0,
503 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
504 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
505 0, 0, 0, 0, 0, 0, 0, 0,
506 0, 0, 0, 0, 0, 0, 0, 0,
507 /* 0xc0; 0xc0->0xff: vendor specific */
508 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
509 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
510 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
511 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
515 * The following "response" functions return the SCSI mid-level's 4 byte
516 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
517 * command completion, they can mask their return value with
518 * SDEG_RES_IMMED_MASK .
520 #define SDEG_RES_IMMED_MASK 0x40000000
522 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
523 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
524 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
525 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
526 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
527 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
528 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
529 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
530 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
531 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
532 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
533 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
534 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
535 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
536 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
537 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
538 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
539 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
540 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
541 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
542 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
543 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
544 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
545 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
546 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
547 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
548 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
549 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
550 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
552 static int sdebug_do_add_host(bool mk_new_store);
553 static int sdebug_add_host_helper(int per_host_idx);
554 static void sdebug_do_remove_host(bool the_end);
555 static int sdebug_add_store(void);
556 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
557 static void sdebug_erase_all_stores(bool apart_from_first);
559 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
562 * The following are overflow arrays for cdbs that "hit" the same index in
563 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
564 * should be placed in opcode_info_arr[], the others should be placed here.
566 static const struct opcode_info_t msense_iarr[] = {
567 {0, 0x1a, 0, F_D_IN, NULL, NULL,
568 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
571 static const struct opcode_info_t mselect_iarr[] = {
572 {0, 0x15, 0, F_D_OUT, NULL, NULL,
573 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
576 static const struct opcode_info_t read_iarr[] = {
577 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
578 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
580 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
581 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
582 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
583 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
587 static const struct opcode_info_t write_iarr[] = {
588 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
589 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
591 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
592 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
594 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
595 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
596 0xbf, 0xc7, 0, 0, 0, 0} },
599 static const struct opcode_info_t verify_iarr[] = {
600 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
601 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
605 static const struct opcode_info_t sa_in_16_iarr[] = {
606 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
607 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
608 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
611 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
612 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
613 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
614 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
615 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
616 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
617 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
620 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
621 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
622 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
623 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
624 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
625 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
626 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
629 static const struct opcode_info_t write_same_iarr[] = {
630 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
631 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
632 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
635 static const struct opcode_info_t reserve_iarr[] = {
636 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
637 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
640 static const struct opcode_info_t release_iarr[] = {
641 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
642 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
645 static const struct opcode_info_t sync_cache_iarr[] = {
646 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
647 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
648 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
651 static const struct opcode_info_t pre_fetch_iarr[] = {
652 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
653 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
654 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
657 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
658 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
659 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
661 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
662 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
663 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
664 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
665 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
666 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
669 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
670 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
671 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
676 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
677 * plus the terminating elements for logic that scans this table such as
678 * REPORT SUPPORTED OPERATION CODES. */
679 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
681 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
682 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
683 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
684 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
685 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
686 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
687 0, 0} }, /* REPORT LUNS */
688 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
689 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
691 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
694 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
695 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
696 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
697 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
698 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
699 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
700 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
702 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
703 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
705 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
706 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
707 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
709 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
710 resp_write_dt0, write_iarr, /* WRITE(16) */
711 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
712 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
713 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
714 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
715 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
716 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
717 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
718 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
719 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
720 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
721 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
722 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
723 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
724 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
725 0xff, 0, 0xc7, 0, 0, 0, 0} },
727 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
728 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
729 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
730 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
731 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
732 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
733 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
734 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
735 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
737 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
738 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
739 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
741 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
742 NULL, release_iarr, /* RELEASE(10) <no response function> */
743 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
746 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
747 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
748 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
749 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
750 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
751 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
752 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
753 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
754 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
755 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
757 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
758 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
759 0, 0, 0, 0} }, /* WRITE_BUFFER */
760 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
761 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
762 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
764 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
765 resp_sync_cache, sync_cache_iarr,
766 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
767 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
768 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
769 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
770 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
771 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
772 resp_pre_fetch, pre_fetch_iarr,
773 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
774 0, 0, 0, 0} }, /* PRE-FETCH (10) */
777 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
778 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
779 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
780 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
781 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
782 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
783 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
784 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
786 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
787 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
790 static int sdebug_num_hosts;
791 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
792 static int sdebug_ato = DEF_ATO;
793 static int sdebug_cdb_len = DEF_CDB_LEN;
794 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
795 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
796 static int sdebug_dif = DEF_DIF;
797 static int sdebug_dix = DEF_DIX;
798 static int sdebug_dsense = DEF_D_SENSE;
799 static int sdebug_every_nth = DEF_EVERY_NTH;
800 static int sdebug_fake_rw = DEF_FAKE_RW;
801 static unsigned int sdebug_guard = DEF_GUARD;
802 static int sdebug_host_max_queue; /* per host */
803 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
804 static int sdebug_max_luns = DEF_MAX_LUNS;
805 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
806 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
807 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
808 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
809 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
810 static int sdebug_no_uld;
811 static int sdebug_num_parts = DEF_NUM_PARTS;
812 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
813 static int sdebug_opt_blks = DEF_OPT_BLKS;
814 static int sdebug_opts = DEF_OPTS;
815 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
816 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
817 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
818 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
819 static int sdebug_sector_size = DEF_SECTOR_SIZE;
820 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
821 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
822 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
823 static unsigned int sdebug_lbpu = DEF_LBPU;
824 static unsigned int sdebug_lbpws = DEF_LBPWS;
825 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
826 static unsigned int sdebug_lbprz = DEF_LBPRZ;
827 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
828 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
829 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
830 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
831 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
832 static int sdebug_uuid_ctl = DEF_UUID_CTL;
833 static bool sdebug_random = DEF_RANDOM;
834 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
835 static bool sdebug_removable = DEF_REMOVABLE;
836 static bool sdebug_clustering;
837 static bool sdebug_host_lock = DEF_HOST_LOCK;
838 static bool sdebug_strict = DEF_STRICT;
839 static bool sdebug_any_injecting_opt;
840 static bool sdebug_no_rwlock;
841 static bool sdebug_verbose;
842 static bool have_dif_prot;
843 static bool write_since_sync;
844 static bool sdebug_statistics = DEF_STATISTICS;
845 static bool sdebug_wp;
846 static bool sdebug_allow_restart;
847 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
848 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
849 static char *sdeb_zbc_model_s;
851 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
852 SAM_LUN_AM_FLAT = 0x1,
853 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
854 SAM_LUN_AM_EXTENDED = 0x3};
855 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
856 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
858 static unsigned int sdebug_store_sectors;
859 static sector_t sdebug_capacity; /* in sectors */
861 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
862 may still need them */
863 static int sdebug_heads; /* heads per disk */
864 static int sdebug_cylinders_per; /* cylinders per surface */
865 static int sdebug_sectors_per; /* sectors per cylinder */
867 static LIST_HEAD(sdebug_host_list);
868 static DEFINE_MUTEX(sdebug_host_list_mutex);
870 static struct xarray per_store_arr;
871 static struct xarray *per_store_ap = &per_store_arr;
872 static int sdeb_first_idx = -1; /* invalid index ==> none created */
873 static int sdeb_most_recent_idx = -1;
874 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
876 static unsigned long map_size;
877 static int num_aborts;
878 static int num_dev_resets;
879 static int num_target_resets;
880 static int num_bus_resets;
881 static int num_host_resets;
882 static int dix_writes;
883 static int dix_reads;
884 static int dif_errors;
886 /* ZBC global data */
887 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
888 static int sdeb_zbc_zone_cap_mb;
889 static int sdeb_zbc_zone_size_mb;
890 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
891 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
893 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
894 static int poll_queues; /* iouring iopoll interface.*/
896 static char sdebug_proc_name[] = MY_NAME;
897 static const char *my_name = MY_NAME;
899 static struct bus_type pseudo_lld_bus;
901 static struct device_driver sdebug_driverfs_driver = {
902 .name = sdebug_proc_name,
903 .bus = &pseudo_lld_bus,
906 static const int check_condition_result =
907 SAM_STAT_CHECK_CONDITION;
909 static const int illegal_condition_result =
910 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
912 static const int device_qfull_result =
913 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
915 static const int condition_met_result = SAM_STAT_CONDITION_MET;
917 static struct dentry *sdebug_debugfs_root;
919 static void sdebug_err_free(struct rcu_head *head)
921 struct sdebug_err_inject *inject =
922 container_of(head, typeof(*inject), rcu);
927 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
929 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
930 struct sdebug_err_inject *err;
932 spin_lock(&devip->list_lock);
933 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
934 if (err->type == new->type && err->cmd == new->cmd) {
935 list_del_rcu(&err->list);
936 call_rcu(&err->rcu, sdebug_err_free);
940 list_add_tail_rcu(&new->list, &devip->inject_err_list);
941 spin_unlock(&devip->list_lock);
944 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
946 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
947 struct sdebug_err_inject *err;
951 if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
956 spin_lock(&devip->list_lock);
957 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
958 if (err->type == type && err->cmd == cmd) {
959 list_del_rcu(&err->list);
960 call_rcu(&err->rcu, sdebug_err_free);
961 spin_unlock(&devip->list_lock);
966 spin_unlock(&devip->list_lock);
972 static int sdebug_error_show(struct seq_file *m, void *p)
974 struct scsi_device *sdev = (struct scsi_device *)m->private;
975 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
976 struct sdebug_err_inject *err;
978 seq_puts(m, "Type\tCount\tCommand\n");
981 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
984 case ERR_ABORT_CMD_FAILED:
985 case ERR_LUN_RESET_FAILED:
986 seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
990 case ERR_FAIL_QUEUE_CMD:
991 seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
992 err->cnt, err->cmd, err->queuecmd_ret);
996 seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
997 err->type, err->cnt, err->cmd,
998 err->host_byte, err->driver_byte,
999 err->status_byte, err->sense_key,
1000 err->asc, err->asq);
1009 static int sdebug_error_open(struct inode *inode, struct file *file)
1011 return single_open(file, sdebug_error_show, inode->i_private);
1014 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1015 size_t count, loff_t *ppos)
1018 unsigned int inject_type;
1019 struct sdebug_err_inject *inject;
1020 struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1022 buf = kzalloc(count + 1, GFP_KERNEL);
1026 if (copy_from_user(buf, ubuf, count)) {
1032 return sdebug_err_remove(sdev, buf, count);
1034 if (sscanf(buf, "%d", &inject_type) != 1) {
1039 inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1045 switch (inject_type) {
1047 case ERR_ABORT_CMD_FAILED:
1048 case ERR_LUN_RESET_FAILED:
1049 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1054 case ERR_FAIL_QUEUE_CMD:
1055 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1056 &inject->cmd, &inject->queuecmd_ret) != 4)
1061 if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1062 &inject->type, &inject->cnt, &inject->cmd,
1063 &inject->host_byte, &inject->driver_byte,
1064 &inject->status_byte, &inject->sense_key,
1065 &inject->asc, &inject->asq) != 9)
1075 sdebug_err_add(sdev, inject);
1085 static const struct file_operations sdebug_error_fops = {
1086 .open = sdebug_error_open,
1088 .write = sdebug_error_write,
1089 .release = single_release,
1092 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1094 struct scsi_target *starget = (struct scsi_target *)m->private;
1095 struct sdebug_target_info *targetip =
1096 (struct sdebug_target_info *)starget->hostdata;
1099 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1104 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1106 return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1109 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1110 const char __user *ubuf, size_t count, loff_t *ppos)
1113 struct scsi_target *starget =
1114 (struct scsi_target *)file->f_inode->i_private;
1115 struct sdebug_target_info *targetip =
1116 (struct sdebug_target_info *)starget->hostdata;
1119 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1120 return ret < 0 ? ret : count;
1125 static const struct file_operations sdebug_target_reset_fail_fops = {
1126 .open = sdebug_target_reset_fail_open,
1128 .write = sdebug_target_reset_fail_write,
1129 .release = single_release,
1132 static int sdebug_target_alloc(struct scsi_target *starget)
1134 struct sdebug_target_info *targetip;
1136 targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1140 targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1141 sdebug_debugfs_root);
1143 debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1144 &sdebug_target_reset_fail_fops);
1146 starget->hostdata = targetip;
1151 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1153 struct sdebug_target_info *targetip = data;
1155 debugfs_remove(targetip->debugfs_entry);
1159 static void sdebug_target_destroy(struct scsi_target *starget)
1161 struct sdebug_target_info *targetip;
1163 targetip = (struct sdebug_target_info *)starget->hostdata;
1165 starget->hostdata = NULL;
1166 async_schedule(sdebug_tartget_cleanup_async, targetip);
1170 /* Only do the extra work involved in logical block provisioning if one or
1171 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1172 * real reads and writes (i.e. not skipping them for speed).
1174 static inline bool scsi_debug_lbp(void)
1176 return 0 == sdebug_fake_rw &&
1177 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1180 static void *lba2fake_store(struct sdeb_store_info *sip,
1181 unsigned long long lba)
1183 struct sdeb_store_info *lsip = sip;
1185 lba = do_div(lba, sdebug_store_sectors);
1186 if (!sip || !sip->storep) {
1188 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
1190 return lsip->storep + lba * sdebug_sector_size;
1193 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1196 sector = sector_div(sector, sdebug_store_sectors);
1198 return sip->dif_storep + sector;
1201 static void sdebug_max_tgts_luns(void)
1203 struct sdebug_host_info *sdbg_host;
1204 struct Scsi_Host *hpnt;
1206 mutex_lock(&sdebug_host_list_mutex);
1207 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1208 hpnt = sdbg_host->shost;
1209 if ((hpnt->this_id >= 0) &&
1210 (sdebug_num_tgts > hpnt->this_id))
1211 hpnt->max_id = sdebug_num_tgts + 1;
1213 hpnt->max_id = sdebug_num_tgts;
1214 /* sdebug_max_luns; */
1215 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1217 mutex_unlock(&sdebug_host_list_mutex);
1220 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1222 /* Set in_bit to -1 to indicate no bit position of invalid field */
1223 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1224 enum sdeb_cmd_data c_d,
1225 int in_byte, int in_bit)
1227 unsigned char *sbuff;
1231 sbuff = scp->sense_buffer;
1233 sdev_printk(KERN_ERR, scp->device,
1234 "%s: sense_buffer is NULL\n", __func__);
1237 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1238 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1239 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1240 memset(sks, 0, sizeof(sks));
1246 sks[0] |= 0x7 & in_bit;
1248 put_unaligned_be16(in_byte, sks + 1);
1249 if (sdebug_dsense) {
1253 sbuff[sl + 1] = 0x6;
1254 memcpy(sbuff + sl + 4, sks, 3);
1256 memcpy(sbuff + 15, sks, 3);
1258 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
1259 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1260 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1263 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1265 if (!scp->sense_buffer) {
1266 sdev_printk(KERN_ERR, scp->device,
1267 "%s: sense_buffer is NULL\n", __func__);
1270 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1272 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1275 sdev_printk(KERN_INFO, scp->device,
1276 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1277 my_name, key, asc, asq);
1280 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1282 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1285 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1288 if (sdebug_verbose) {
1290 sdev_printk(KERN_INFO, dev,
1291 "%s: BLKFLSBUF [0x1261]\n", __func__);
1292 else if (0x5331 == cmd)
1293 sdev_printk(KERN_INFO, dev,
1294 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1297 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1301 /* return -ENOTTY; // correct return but upsets fdisk */
1304 static void config_cdb_len(struct scsi_device *sdev)
1306 switch (sdebug_cdb_len) {
1307 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1308 sdev->use_10_for_rw = false;
1309 sdev->use_16_for_rw = false;
1310 sdev->use_10_for_ms = false;
1312 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1313 sdev->use_10_for_rw = true;
1314 sdev->use_16_for_rw = false;
1315 sdev->use_10_for_ms = false;
1317 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1318 sdev->use_10_for_rw = true;
1319 sdev->use_16_for_rw = false;
1320 sdev->use_10_for_ms = true;
1323 sdev->use_10_for_rw = false;
1324 sdev->use_16_for_rw = true;
1325 sdev->use_10_for_ms = true;
1327 case 32: /* No knobs to suggest this so same as 16 for now */
1328 sdev->use_10_for_rw = false;
1329 sdev->use_16_for_rw = true;
1330 sdev->use_10_for_ms = true;
1333 pr_warn("unexpected cdb_len=%d, force to 10\n",
1335 sdev->use_10_for_rw = true;
1336 sdev->use_16_for_rw = false;
1337 sdev->use_10_for_ms = false;
1338 sdebug_cdb_len = 10;
1343 static void all_config_cdb_len(void)
1345 struct sdebug_host_info *sdbg_host;
1346 struct Scsi_Host *shost;
1347 struct scsi_device *sdev;
1349 mutex_lock(&sdebug_host_list_mutex);
1350 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1351 shost = sdbg_host->shost;
1352 shost_for_each_device(sdev, shost) {
1353 config_cdb_len(sdev);
1356 mutex_unlock(&sdebug_host_list_mutex);
1359 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1361 struct sdebug_host_info *sdhp = devip->sdbg_host;
1362 struct sdebug_dev_info *dp;
1364 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1365 if ((devip->sdbg_host == dp->sdbg_host) &&
1366 (devip->target == dp->target)) {
1367 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1372 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1376 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1377 if (k != SDEBUG_NUM_UAS) {
1378 const char *cp = NULL;
1382 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1383 POWER_ON_RESET_ASCQ);
1385 cp = "power on reset";
1387 case SDEBUG_UA_POOCCUR:
1388 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1389 POWER_ON_OCCURRED_ASCQ);
1391 cp = "power on occurred";
1393 case SDEBUG_UA_BUS_RESET:
1394 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1399 case SDEBUG_UA_MODE_CHANGED:
1400 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1403 cp = "mode parameters changed";
1405 case SDEBUG_UA_CAPACITY_CHANGED:
1406 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1407 CAPACITY_CHANGED_ASCQ);
1409 cp = "capacity data changed";
1411 case SDEBUG_UA_MICROCODE_CHANGED:
1412 mk_sense_buffer(scp, UNIT_ATTENTION,
1414 MICROCODE_CHANGED_ASCQ);
1416 cp = "microcode has been changed";
1418 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1419 mk_sense_buffer(scp, UNIT_ATTENTION,
1421 MICROCODE_CHANGED_WO_RESET_ASCQ);
1423 cp = "microcode has been changed without reset";
1425 case SDEBUG_UA_LUNS_CHANGED:
1427 * SPC-3 behavior is to report a UNIT ATTENTION with
1428 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1429 * on the target, until a REPORT LUNS command is
1430 * received. SPC-4 behavior is to report it only once.
1431 * NOTE: sdebug_scsi_level does not use the same
1432 * values as struct scsi_device->scsi_level.
1434 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1435 clear_luns_changed_on_target(devip);
1436 mk_sense_buffer(scp, UNIT_ATTENTION,
1440 cp = "reported luns data has changed";
1443 pr_warn("unexpected unit attention code=%d\n", k);
1448 clear_bit(k, devip->uas_bm);
1450 sdev_printk(KERN_INFO, scp->device,
1451 "%s reports: Unit attention: %s\n",
1453 return check_condition_result;
1458 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1459 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1463 struct scsi_data_buffer *sdb = &scp->sdb;
1467 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1468 return DID_ERROR << 16;
1470 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1472 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1477 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1478 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1479 * calls, not required to write in ascending offset order. Assumes resid
1480 * set to scsi_bufflen() prior to any calls.
1482 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1483 int arr_len, unsigned int off_dst)
1485 unsigned int act_len, n;
1486 struct scsi_data_buffer *sdb = &scp->sdb;
1487 off_t skip = off_dst;
1489 if (sdb->length <= off_dst)
1491 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1492 return DID_ERROR << 16;
1494 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1495 arr, arr_len, skip);
1496 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1497 __func__, off_dst, scsi_bufflen(scp), act_len,
1498 scsi_get_resid(scp));
1499 n = scsi_bufflen(scp) - (off_dst + act_len);
1500 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1504 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1505 * 'arr' or -1 if error.
1507 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1510 if (!scsi_bufflen(scp))
1512 if (scp->sc_data_direction != DMA_TO_DEVICE)
1515 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1519 static char sdebug_inq_vendor_id[9] = "Linux ";
1520 static char sdebug_inq_product_id[17] = "scsi_debug ";
1521 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1522 /* Use some locally assigned NAAs for SAS addresses. */
1523 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1524 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1525 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1527 /* Device identification VPD page. Returns number of bytes placed in arr */
1528 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1529 int target_dev_id, int dev_id_num,
1530 const char *dev_id_str, int dev_id_str_len,
1531 const uuid_t *lu_name)
1536 port_a = target_dev_id + 1;
1537 /* T10 vendor identifier field format (faked) */
1538 arr[0] = 0x2; /* ASCII */
1541 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1542 memcpy(&arr[12], sdebug_inq_product_id, 16);
1543 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1544 num = 8 + 16 + dev_id_str_len;
1547 if (dev_id_num >= 0) {
1548 if (sdebug_uuid_ctl) {
1549 /* Locally assigned UUID */
1550 arr[num++] = 0x1; /* binary (not necessarily sas) */
1551 arr[num++] = 0xa; /* PIV=0, lu, naa */
1554 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1556 memcpy(arr + num, lu_name, 16);
1559 /* NAA-3, Logical unit identifier (binary) */
1560 arr[num++] = 0x1; /* binary (not necessarily sas) */
1561 arr[num++] = 0x3; /* PIV=0, lu, naa */
1564 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1567 /* Target relative port number */
1568 arr[num++] = 0x61; /* proto=sas, binary */
1569 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1570 arr[num++] = 0x0; /* reserved */
1571 arr[num++] = 0x4; /* length */
1572 arr[num++] = 0x0; /* reserved */
1573 arr[num++] = 0x0; /* reserved */
1575 arr[num++] = 0x1; /* relative port A */
1577 /* NAA-3, Target port identifier */
1578 arr[num++] = 0x61; /* proto=sas, binary */
1579 arr[num++] = 0x93; /* piv=1, target port, naa */
1582 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1584 /* NAA-3, Target port group identifier */
1585 arr[num++] = 0x61; /* proto=sas, binary */
1586 arr[num++] = 0x95; /* piv=1, target port group id */
1591 put_unaligned_be16(port_group_id, arr + num);
1593 /* NAA-3, Target device identifier */
1594 arr[num++] = 0x61; /* proto=sas, binary */
1595 arr[num++] = 0xa3; /* piv=1, target device, naa */
1598 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1600 /* SCSI name string: Target device identifier */
1601 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1602 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1605 memcpy(arr + num, "naa.32222220", 12);
1607 snprintf(b, sizeof(b), "%08X", target_dev_id);
1608 memcpy(arr + num, b, 8);
1610 memset(arr + num, 0, 4);
1615 static unsigned char vpd84_data[] = {
1616 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1617 0x22,0x22,0x22,0x0,0xbb,0x1,
1618 0x22,0x22,0x22,0x0,0xbb,0x2,
1621 /* Software interface identification VPD page */
1622 static int inquiry_vpd_84(unsigned char *arr)
1624 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1625 return sizeof(vpd84_data);
1628 /* Management network addresses VPD page */
1629 static int inquiry_vpd_85(unsigned char *arr)
1632 const char *na1 = "https://www.kernel.org/config";
1633 const char *na2 = "http://www.kernel.org/log";
1636 arr[num++] = 0x1; /* lu, storage config */
1637 arr[num++] = 0x0; /* reserved */
1642 plen = ((plen / 4) + 1) * 4;
1643 arr[num++] = plen; /* length, null termianted, padded */
1644 memcpy(arr + num, na1, olen);
1645 memset(arr + num + olen, 0, plen - olen);
1648 arr[num++] = 0x4; /* lu, logging */
1649 arr[num++] = 0x0; /* reserved */
1654 plen = ((plen / 4) + 1) * 4;
1655 arr[num++] = plen; /* length, null terminated, padded */
1656 memcpy(arr + num, na2, olen);
1657 memset(arr + num + olen, 0, plen - olen);
1663 /* SCSI ports VPD page */
1664 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1669 port_a = target_dev_id + 1;
1670 port_b = port_a + 1;
1671 arr[num++] = 0x0; /* reserved */
1672 arr[num++] = 0x0; /* reserved */
1674 arr[num++] = 0x1; /* relative port 1 (primary) */
1675 memset(arr + num, 0, 6);
1678 arr[num++] = 12; /* length tp descriptor */
1679 /* naa-5 target port identifier (A) */
1680 arr[num++] = 0x61; /* proto=sas, binary */
1681 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1682 arr[num++] = 0x0; /* reserved */
1683 arr[num++] = 0x8; /* length */
1684 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1686 arr[num++] = 0x0; /* reserved */
1687 arr[num++] = 0x0; /* reserved */
1689 arr[num++] = 0x2; /* relative port 2 (secondary) */
1690 memset(arr + num, 0, 6);
1693 arr[num++] = 12; /* length tp descriptor */
1694 /* naa-5 target port identifier (B) */
1695 arr[num++] = 0x61; /* proto=sas, binary */
1696 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1697 arr[num++] = 0x0; /* reserved */
1698 arr[num++] = 0x8; /* length */
1699 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1706 static unsigned char vpd89_data[] = {
1707 /* from 4th byte */ 0,0,0,0,
1708 'l','i','n','u','x',' ',' ',' ',
1709 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1711 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1713 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1714 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1715 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1716 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1718 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1720 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1722 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1723 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1724 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1725 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1726 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1727 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1728 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1729 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1730 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1731 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1732 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1733 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1734 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1735 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1736 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1737 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1738 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1739 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1740 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1741 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1742 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1743 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1744 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1745 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1746 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1747 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1750 /* ATA Information VPD page */
1751 static int inquiry_vpd_89(unsigned char *arr)
1753 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1754 return sizeof(vpd89_data);
1758 static unsigned char vpdb0_data[] = {
1759 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1760 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1761 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1762 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1765 /* Block limits VPD page (SBC-3) */
1766 static int inquiry_vpd_b0(unsigned char *arr)
1770 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1772 /* Optimal transfer length granularity */
1773 if (sdebug_opt_xferlen_exp != 0 &&
1774 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1775 gran = 1 << sdebug_opt_xferlen_exp;
1777 gran = 1 << sdebug_physblk_exp;
1778 put_unaligned_be16(gran, arr + 2);
1780 /* Maximum Transfer Length */
1781 if (sdebug_store_sectors > 0x400)
1782 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1784 /* Optimal Transfer Length */
1785 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1788 /* Maximum Unmap LBA Count */
1789 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1791 /* Maximum Unmap Block Descriptor Count */
1792 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1795 /* Unmap Granularity Alignment */
1796 if (sdebug_unmap_alignment) {
1797 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1798 arr[28] |= 0x80; /* UGAVALID */
1801 /* Optimal Unmap Granularity */
1802 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1804 /* Maximum WRITE SAME Length */
1805 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1807 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1810 /* Block device characteristics VPD page (SBC-3) */
1811 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1813 memset(arr, 0, 0x3c);
1815 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1817 arr[3] = 5; /* less than 1.8" */
1818 if (devip->zmodel == BLK_ZONED_HA)
1819 arr[4] = 1 << 4; /* zoned field = 01b */
1824 /* Logical block provisioning VPD page (SBC-4) */
1825 static int inquiry_vpd_b2(unsigned char *arr)
1827 memset(arr, 0, 0x4);
1828 arr[0] = 0; /* threshold exponent */
1835 if (sdebug_lbprz && scsi_debug_lbp())
1836 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1837 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1838 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1839 /* threshold_percentage=0 */
1843 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1844 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1846 memset(arr, 0, 0x3c);
1847 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1849 * Set Optimal number of open sequential write preferred zones and
1850 * Optimal number of non-sequentially written sequential write
1851 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1852 * fields set to zero, apart from Max. number of open swrz_s field.
1854 put_unaligned_be32(0xffffffff, &arr[4]);
1855 put_unaligned_be32(0xffffffff, &arr[8]);
1856 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1857 put_unaligned_be32(devip->max_open, &arr[12]);
1859 put_unaligned_be32(0xffffffff, &arr[12]);
1860 if (devip->zcap < devip->zsize) {
1861 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1862 put_unaligned_be64(devip->zsize, &arr[20]);
1869 #define SDEBUG_LONG_INQ_SZ 96
1870 #define SDEBUG_MAX_INQ_ARR_SZ 584
1872 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1874 unsigned char pq_pdt;
1876 unsigned char *cmd = scp->cmnd;
1879 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1881 alloc_len = get_unaligned_be16(cmd + 3);
1882 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1884 return DID_REQUEUE << 16;
1885 is_disk = (sdebug_ptype == TYPE_DISK);
1886 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1887 is_disk_zbc = (is_disk || is_zbc);
1888 have_wlun = scsi_is_wlun(scp->device->lun);
1890 pq_pdt = TYPE_WLUN; /* present, wlun */
1891 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1892 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1894 pq_pdt = (sdebug_ptype & 0x1f);
1896 if (0x2 & cmd[1]) { /* CMDDT bit set */
1897 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1899 return check_condition_result;
1900 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1901 int lu_id_num, port_group_id, target_dev_id;
1904 int host_no = devip->sdbg_host->shost->host_no;
1906 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1907 (devip->channel & 0x7f);
1908 if (sdebug_vpd_use_hostno == 0)
1910 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1911 (devip->target * 1000) + devip->lun);
1912 target_dev_id = ((host_no + 1) * 2000) +
1913 (devip->target * 1000) - 3;
1914 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1915 if (0 == cmd[2]) { /* supported vital product data pages */
1916 arr[1] = cmd[2]; /*sanity */
1918 arr[n++] = 0x0; /* this page */
1919 arr[n++] = 0x80; /* unit serial number */
1920 arr[n++] = 0x83; /* device identification */
1921 arr[n++] = 0x84; /* software interface ident. */
1922 arr[n++] = 0x85; /* management network addresses */
1923 arr[n++] = 0x86; /* extended inquiry */
1924 arr[n++] = 0x87; /* mode page policy */
1925 arr[n++] = 0x88; /* SCSI ports */
1926 if (is_disk_zbc) { /* SBC or ZBC */
1927 arr[n++] = 0x89; /* ATA information */
1928 arr[n++] = 0xb0; /* Block limits */
1929 arr[n++] = 0xb1; /* Block characteristics */
1931 arr[n++] = 0xb2; /* LB Provisioning */
1933 arr[n++] = 0xb6; /* ZB dev. char. */
1935 arr[3] = n - 4; /* number of supported VPD pages */
1936 } else if (0x80 == cmd[2]) { /* unit serial number */
1937 arr[1] = cmd[2]; /*sanity */
1939 memcpy(&arr[4], lu_id_str, len);
1940 } else if (0x83 == cmd[2]) { /* device identification */
1941 arr[1] = cmd[2]; /*sanity */
1942 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1943 target_dev_id, lu_id_num,
1946 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1947 arr[1] = cmd[2]; /*sanity */
1948 arr[3] = inquiry_vpd_84(&arr[4]);
1949 } else if (0x85 == cmd[2]) { /* Management network addresses */
1950 arr[1] = cmd[2]; /*sanity */
1951 arr[3] = inquiry_vpd_85(&arr[4]);
1952 } else if (0x86 == cmd[2]) { /* extended inquiry */
1953 arr[1] = cmd[2]; /*sanity */
1954 arr[3] = 0x3c; /* number of following entries */
1955 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1956 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1957 else if (have_dif_prot)
1958 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1960 arr[4] = 0x0; /* no protection stuff */
1961 arr[5] = 0x7; /* head of q, ordered + simple q's */
1962 } else if (0x87 == cmd[2]) { /* mode page policy */
1963 arr[1] = cmd[2]; /*sanity */
1964 arr[3] = 0x8; /* number of following entries */
1965 arr[4] = 0x2; /* disconnect-reconnect mp */
1966 arr[6] = 0x80; /* mlus, shared */
1967 arr[8] = 0x18; /* protocol specific lu */
1968 arr[10] = 0x82; /* mlus, per initiator port */
1969 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1970 arr[1] = cmd[2]; /*sanity */
1971 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1972 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1973 arr[1] = cmd[2]; /*sanity */
1974 n = inquiry_vpd_89(&arr[4]);
1975 put_unaligned_be16(n, arr + 2);
1976 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1977 arr[1] = cmd[2]; /*sanity */
1978 arr[3] = inquiry_vpd_b0(&arr[4]);
1979 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1980 arr[1] = cmd[2]; /*sanity */
1981 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1982 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1983 arr[1] = cmd[2]; /*sanity */
1984 arr[3] = inquiry_vpd_b2(&arr[4]);
1985 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1986 arr[1] = cmd[2]; /*sanity */
1987 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1989 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1991 return check_condition_result;
1993 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1994 ret = fill_from_dev_buffer(scp, arr,
1995 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1999 /* drops through here for a standard inquiry */
2000 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
2001 arr[2] = sdebug_scsi_level;
2002 arr[3] = 2; /* response_data_format==2 */
2003 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2004 arr[5] = (int)have_dif_prot; /* PROTECT bit */
2005 if (sdebug_vpd_use_hostno == 0)
2006 arr[5] |= 0x10; /* claim: implicit TPGS */
2007 arr[6] = 0x10; /* claim: MultiP */
2008 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2009 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2010 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2011 memcpy(&arr[16], sdebug_inq_product_id, 16);
2012 memcpy(&arr[32], sdebug_inq_product_rev, 4);
2013 /* Use Vendor Specific area to place driver date in ASCII hex */
2014 memcpy(&arr[36], sdebug_version_date, 8);
2015 /* version descriptors (2 bytes each) follow */
2016 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
2017 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
2019 if (is_disk) { /* SBC-4 no version claimed */
2020 put_unaligned_be16(0x600, arr + n);
2022 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
2023 put_unaligned_be16(0x525, arr + n);
2025 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
2026 put_unaligned_be16(0x624, arr + n);
2029 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
2030 ret = fill_from_dev_buffer(scp, arr,
2031 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2036 /* See resp_iec_m_pg() for how this data is manipulated */
2037 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2040 static int resp_requests(struct scsi_cmnd *scp,
2041 struct sdebug_dev_info *devip)
2043 unsigned char *cmd = scp->cmnd;
2044 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
2045 bool dsense = !!(cmd[1] & 1);
2046 u32 alloc_len = cmd[4];
2048 int stopped_state = atomic_read(&devip->stopped);
2050 memset(arr, 0, sizeof(arr));
2051 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
2055 arr[2] = LOGICAL_UNIT_NOT_READY;
2056 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2060 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
2061 arr[7] = 0xa; /* 18 byte sense buffer */
2062 arr[12] = LOGICAL_UNIT_NOT_READY;
2063 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2065 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2066 /* Information exceptions control mode page: TEST=1, MRIE=6 */
2069 arr[1] = 0x0; /* NO_SENSE in sense_key */
2070 arr[2] = THRESHOLD_EXCEEDED;
2071 arr[3] = 0xff; /* Failure prediction(false) */
2075 arr[2] = 0x0; /* NO_SENSE in sense_key */
2076 arr[7] = 0xa; /* 18 byte sense buffer */
2077 arr[12] = THRESHOLD_EXCEEDED;
2078 arr[13] = 0xff; /* Failure prediction(false) */
2080 } else { /* nothing to report */
2083 memset(arr, 0, len);
2086 memset(arr, 0, len);
2091 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2094 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2096 unsigned char *cmd = scp->cmnd;
2097 int power_cond, want_stop, stopped_state;
2100 power_cond = (cmd[4] & 0xf0) >> 4;
2102 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2103 return check_condition_result;
2105 want_stop = !(cmd[4] & 1);
2106 stopped_state = atomic_read(&devip->stopped);
2107 if (stopped_state == 2) {
2108 ktime_t now_ts = ktime_get_boottime();
2110 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2111 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2113 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2114 /* tur_ms_to_ready timer extinguished */
2115 atomic_set(&devip->stopped, 0);
2119 if (stopped_state == 2) {
2121 stopped_state = 1; /* dummy up success */
2122 } else { /* Disallow tur_ms_to_ready delay to be overridden */
2123 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2124 return check_condition_result;
2128 changing = (stopped_state != want_stop);
2130 atomic_xchg(&devip->stopped, want_stop);
2131 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
2132 return SDEG_RES_IMMED_MASK;
2137 static sector_t get_sdebug_capacity(void)
2139 static const unsigned int gibibyte = 1073741824;
2141 if (sdebug_virtual_gb > 0)
2142 return (sector_t)sdebug_virtual_gb *
2143 (gibibyte / sdebug_sector_size);
2145 return sdebug_store_sectors;
2148 #define SDEBUG_READCAP_ARR_SZ 8
2149 static int resp_readcap(struct scsi_cmnd *scp,
2150 struct sdebug_dev_info *devip)
2152 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2155 /* following just in case virtual_gb changed */
2156 sdebug_capacity = get_sdebug_capacity();
2157 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2158 if (sdebug_capacity < 0xffffffff) {
2159 capac = (unsigned int)sdebug_capacity - 1;
2160 put_unaligned_be32(capac, arr + 0);
2162 put_unaligned_be32(0xffffffff, arr + 0);
2163 put_unaligned_be16(sdebug_sector_size, arr + 6);
2164 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2167 #define SDEBUG_READCAP16_ARR_SZ 32
2168 static int resp_readcap16(struct scsi_cmnd *scp,
2169 struct sdebug_dev_info *devip)
2171 unsigned char *cmd = scp->cmnd;
2172 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2175 alloc_len = get_unaligned_be32(cmd + 10);
2176 /* following just in case virtual_gb changed */
2177 sdebug_capacity = get_sdebug_capacity();
2178 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2179 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2180 put_unaligned_be32(sdebug_sector_size, arr + 8);
2181 arr[13] = sdebug_physblk_exp & 0xf;
2182 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2184 if (scsi_debug_lbp()) {
2185 arr[14] |= 0x80; /* LBPME */
2186 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2187 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2188 * in the wider field maps to 0 in this field.
2190 if (sdebug_lbprz & 1) /* precisely what the draft requires */
2195 * Since the scsi_debug READ CAPACITY implementation always reports the
2196 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2198 if (devip->zmodel == BLK_ZONED_HM)
2201 arr[15] = sdebug_lowest_aligned & 0xff;
2203 if (have_dif_prot) {
2204 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2205 arr[12] |= 1; /* PROT_EN */
2208 return fill_from_dev_buffer(scp, arr,
2209 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2212 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2214 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2215 struct sdebug_dev_info *devip)
2217 unsigned char *cmd = scp->cmnd;
2219 int host_no = devip->sdbg_host->shost->host_no;
2220 int port_group_a, port_group_b, port_a, port_b;
2224 alen = get_unaligned_be32(cmd + 6);
2225 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2227 return DID_REQUEUE << 16;
2229 * EVPD page 0x88 states we have two ports, one
2230 * real and a fake port with no device connected.
2231 * So we create two port groups with one port each
2232 * and set the group with port B to unavailable.
2234 port_a = 0x1; /* relative port A */
2235 port_b = 0x2; /* relative port B */
2236 port_group_a = (((host_no + 1) & 0x7f) << 8) +
2237 (devip->channel & 0x7f);
2238 port_group_b = (((host_no + 1) & 0x7f) << 8) +
2239 (devip->channel & 0x7f) + 0x80;
2242 * The asymmetric access state is cycled according to the host_id.
2245 if (sdebug_vpd_use_hostno == 0) {
2246 arr[n++] = host_no % 3; /* Asymm access state */
2247 arr[n++] = 0x0F; /* claim: all states are supported */
2249 arr[n++] = 0x0; /* Active/Optimized path */
2250 arr[n++] = 0x01; /* only support active/optimized paths */
2252 put_unaligned_be16(port_group_a, arr + n);
2254 arr[n++] = 0; /* Reserved */
2255 arr[n++] = 0; /* Status code */
2256 arr[n++] = 0; /* Vendor unique */
2257 arr[n++] = 0x1; /* One port per group */
2258 arr[n++] = 0; /* Reserved */
2259 arr[n++] = 0; /* Reserved */
2260 put_unaligned_be16(port_a, arr + n);
2262 arr[n++] = 3; /* Port unavailable */
2263 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2264 put_unaligned_be16(port_group_b, arr + n);
2266 arr[n++] = 0; /* Reserved */
2267 arr[n++] = 0; /* Status code */
2268 arr[n++] = 0; /* Vendor unique */
2269 arr[n++] = 0x1; /* One port per group */
2270 arr[n++] = 0; /* Reserved */
2271 arr[n++] = 0; /* Reserved */
2272 put_unaligned_be16(port_b, arr + n);
2276 put_unaligned_be32(rlen, arr + 0);
2279 * Return the smallest value of either
2280 * - The allocated length
2281 * - The constructed command length
2282 * - The maximum array size
2284 rlen = min(alen, n);
2285 ret = fill_from_dev_buffer(scp, arr,
2286 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2291 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2292 struct sdebug_dev_info *devip)
2295 u8 reporting_opts, req_opcode, sdeb_i, supp;
2297 u32 alloc_len, a_len;
2298 int k, offset, len, errsts, count, bump, na;
2299 const struct opcode_info_t *oip;
2300 const struct opcode_info_t *r_oip;
2302 u8 *cmd = scp->cmnd;
2304 rctd = !!(cmd[2] & 0x80);
2305 reporting_opts = cmd[2] & 0x7;
2306 req_opcode = cmd[3];
2307 req_sa = get_unaligned_be16(cmd + 4);
2308 alloc_len = get_unaligned_be32(cmd + 6);
2309 if (alloc_len < 4 || alloc_len > 0xffff) {
2310 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2311 return check_condition_result;
2313 if (alloc_len > 8192)
2317 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2319 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2321 return check_condition_result;
2323 switch (reporting_opts) {
2324 case 0: /* all commands */
2325 /* count number of commands */
2326 for (count = 0, oip = opcode_info_arr;
2327 oip->num_attached != 0xff; ++oip) {
2328 if (F_INV_OP & oip->flags)
2330 count += (oip->num_attached + 1);
2332 bump = rctd ? 20 : 8;
2333 put_unaligned_be32(count * bump, arr);
2334 for (offset = 4, oip = opcode_info_arr;
2335 oip->num_attached != 0xff && offset < a_len; ++oip) {
2336 if (F_INV_OP & oip->flags)
2338 na = oip->num_attached;
2339 arr[offset] = oip->opcode;
2340 put_unaligned_be16(oip->sa, arr + offset + 2);
2342 arr[offset + 5] |= 0x2;
2343 if (FF_SA & oip->flags)
2344 arr[offset + 5] |= 0x1;
2345 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2347 put_unaligned_be16(0xa, arr + offset + 8);
2349 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2350 if (F_INV_OP & oip->flags)
2353 arr[offset] = oip->opcode;
2354 put_unaligned_be16(oip->sa, arr + offset + 2);
2356 arr[offset + 5] |= 0x2;
2357 if (FF_SA & oip->flags)
2358 arr[offset + 5] |= 0x1;
2359 put_unaligned_be16(oip->len_mask[0],
2362 put_unaligned_be16(0xa,
2369 case 1: /* one command: opcode only */
2370 case 2: /* one command: opcode plus service action */
2371 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2372 sdeb_i = opcode_ind_arr[req_opcode];
2373 oip = &opcode_info_arr[sdeb_i];
2374 if (F_INV_OP & oip->flags) {
2378 if (1 == reporting_opts) {
2379 if (FF_SA & oip->flags) {
2380 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2383 return check_condition_result;
2386 } else if (2 == reporting_opts &&
2387 0 == (FF_SA & oip->flags)) {
2388 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2389 kfree(arr); /* point at requested sa */
2390 return check_condition_result;
2392 if (0 == (FF_SA & oip->flags) &&
2393 req_opcode == oip->opcode)
2395 else if (0 == (FF_SA & oip->flags)) {
2396 na = oip->num_attached;
2397 for (k = 0, oip = oip->arrp; k < na;
2399 if (req_opcode == oip->opcode)
2402 supp = (k >= na) ? 1 : 3;
2403 } else if (req_sa != oip->sa) {
2404 na = oip->num_attached;
2405 for (k = 0, oip = oip->arrp; k < na;
2407 if (req_sa == oip->sa)
2410 supp = (k >= na) ? 1 : 3;
2414 u = oip->len_mask[0];
2415 put_unaligned_be16(u, arr + 2);
2416 arr[4] = oip->opcode;
2417 for (k = 1; k < u; ++k)
2418 arr[4 + k] = (k < 16) ?
2419 oip->len_mask[k] : 0xff;
2424 arr[1] = (rctd ? 0x80 : 0) | supp;
2426 put_unaligned_be16(0xa, arr + offset);
2431 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2433 return check_condition_result;
2435 offset = (offset < a_len) ? offset : a_len;
2436 len = (offset < alloc_len) ? offset : alloc_len;
2437 errsts = fill_from_dev_buffer(scp, arr, len);
2442 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2443 struct sdebug_dev_info *devip)
2448 u8 *cmd = scp->cmnd;
2450 memset(arr, 0, sizeof(arr));
2451 repd = !!(cmd[2] & 0x80);
2452 alloc_len = get_unaligned_be32(cmd + 6);
2453 if (alloc_len < 4) {
2454 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2455 return check_condition_result;
2457 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2458 arr[1] = 0x1; /* ITNRS */
2465 len = (len < alloc_len) ? len : alloc_len;
2466 return fill_from_dev_buffer(scp, arr, len);
2469 /* <<Following mode page info copied from ST318451LW>> */
2471 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2472 { /* Read-Write Error Recovery page for mode_sense */
2473 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2476 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2478 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2479 return sizeof(err_recov_pg);
2482 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2483 { /* Disconnect-Reconnect page for mode_sense */
2484 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2485 0, 0, 0, 0, 0, 0, 0, 0};
2487 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2489 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2490 return sizeof(disconnect_pg);
2493 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2494 { /* Format device page for mode_sense */
2495 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2496 0, 0, 0, 0, 0, 0, 0, 0,
2497 0, 0, 0, 0, 0x40, 0, 0, 0};
2499 memcpy(p, format_pg, sizeof(format_pg));
2500 put_unaligned_be16(sdebug_sectors_per, p + 10);
2501 put_unaligned_be16(sdebug_sector_size, p + 12);
2502 if (sdebug_removable)
2503 p[20] |= 0x20; /* should agree with INQUIRY */
2505 memset(p + 2, 0, sizeof(format_pg) - 2);
2506 return sizeof(format_pg);
2509 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2510 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2513 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2514 { /* Caching page for mode_sense */
2515 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2516 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2517 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2518 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2520 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2521 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2522 memcpy(p, caching_pg, sizeof(caching_pg));
2524 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2525 else if (2 == pcontrol)
2526 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2527 return sizeof(caching_pg);
2530 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2533 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2534 { /* Control mode page for mode_sense */
2535 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2537 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2541 ctrl_m_pg[2] |= 0x4;
2543 ctrl_m_pg[2] &= ~0x4;
2546 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2548 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2550 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2551 else if (2 == pcontrol)
2552 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2553 return sizeof(ctrl_m_pg);
2557 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2558 { /* Informational Exceptions control mode page for mode_sense */
2559 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2561 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2564 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2566 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2567 else if (2 == pcontrol)
2568 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2569 return sizeof(iec_m_pg);
2572 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2573 { /* SAS SSP mode page - short format for mode_sense */
2574 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2575 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2577 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2579 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2580 return sizeof(sas_sf_m_pg);
2584 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2586 { /* SAS phy control and discover mode page for mode_sense */
2587 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2588 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2589 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2590 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2591 0x2, 0, 0, 0, 0, 0, 0, 0,
2592 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2593 0, 0, 0, 0, 0, 0, 0, 0,
2594 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2595 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2596 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2597 0x3, 0, 0, 0, 0, 0, 0, 0,
2598 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2599 0, 0, 0, 0, 0, 0, 0, 0,
2603 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2604 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2605 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2606 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2607 port_a = target_dev_id + 1;
2608 port_b = port_a + 1;
2609 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2610 put_unaligned_be32(port_a, p + 20);
2611 put_unaligned_be32(port_b, p + 48 + 20);
2613 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2614 return sizeof(sas_pcd_m_pg);
2617 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2618 { /* SAS SSP shared protocol specific port mode subpage */
2619 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2620 0, 0, 0, 0, 0, 0, 0, 0,
2623 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2625 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2626 return sizeof(sas_sha_m_pg);
2629 #define SDEBUG_MAX_MSENSE_SZ 256
2631 static int resp_mode_sense(struct scsi_cmnd *scp,
2632 struct sdebug_dev_info *devip)
2634 int pcontrol, pcode, subpcode, bd_len;
2635 unsigned char dev_spec;
2636 u32 alloc_len, offset, len;
2638 int target = scp->device->id;
2640 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2641 unsigned char *cmd = scp->cmnd;
2642 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2644 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2645 pcontrol = (cmd[2] & 0xc0) >> 6;
2646 pcode = cmd[2] & 0x3f;
2648 msense_6 = (MODE_SENSE == cmd[0]);
2649 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2650 is_disk = (sdebug_ptype == TYPE_DISK);
2651 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2652 if ((is_disk || is_zbc) && !dbd)
2653 bd_len = llbaa ? 16 : 8;
2656 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2657 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2658 if (0x3 == pcontrol) { /* Saving values not supported */
2659 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2660 return check_condition_result;
2662 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2663 (devip->target * 1000) - 3;
2664 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2665 if (is_disk || is_zbc) {
2666 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2678 arr[4] = 0x1; /* set LONGLBA bit */
2679 arr[7] = bd_len; /* assume 255 or less */
2683 if ((bd_len > 0) && (!sdebug_capacity))
2684 sdebug_capacity = get_sdebug_capacity();
2687 if (sdebug_capacity > 0xfffffffe)
2688 put_unaligned_be32(0xffffffff, ap + 0);
2690 put_unaligned_be32(sdebug_capacity, ap + 0);
2691 put_unaligned_be16(sdebug_sector_size, ap + 6);
2694 } else if (16 == bd_len) {
2695 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2696 put_unaligned_be32(sdebug_sector_size, ap + 12);
2701 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2702 /* TODO: Control Extension page */
2703 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2704 return check_condition_result;
2709 case 0x1: /* Read-Write error recovery page, direct access */
2710 len = resp_err_recov_pg(ap, pcontrol, target);
2713 case 0x2: /* Disconnect-Reconnect page, all devices */
2714 len = resp_disconnect_pg(ap, pcontrol, target);
2717 case 0x3: /* Format device page, direct access */
2719 len = resp_format_pg(ap, pcontrol, target);
2724 case 0x8: /* Caching page, direct access */
2725 if (is_disk || is_zbc) {
2726 len = resp_caching_pg(ap, pcontrol, target);
2731 case 0xa: /* Control Mode page, all devices */
2732 len = resp_ctrl_m_pg(ap, pcontrol, target);
2735 case 0x19: /* if spc==1 then sas phy, control+discover */
2736 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2737 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2738 return check_condition_result;
2741 if ((0x0 == subpcode) || (0xff == subpcode))
2742 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2743 if ((0x1 == subpcode) || (0xff == subpcode))
2744 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2746 if ((0x2 == subpcode) || (0xff == subpcode))
2747 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2750 case 0x1c: /* Informational Exceptions Mode page, all devices */
2751 len = resp_iec_m_pg(ap, pcontrol, target);
2754 case 0x3f: /* Read all Mode pages */
2755 if ((0 == subpcode) || (0xff == subpcode)) {
2756 len = resp_err_recov_pg(ap, pcontrol, target);
2757 len += resp_disconnect_pg(ap + len, pcontrol, target);
2759 len += resp_format_pg(ap + len, pcontrol,
2761 len += resp_caching_pg(ap + len, pcontrol,
2763 } else if (is_zbc) {
2764 len += resp_caching_pg(ap + len, pcontrol,
2767 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2768 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2769 if (0xff == subpcode) {
2770 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2771 target, target_dev_id);
2772 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2774 len += resp_iec_m_pg(ap + len, pcontrol, target);
2777 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2778 return check_condition_result;
2786 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2787 return check_condition_result;
2790 arr[0] = offset - 1;
2792 put_unaligned_be16((offset - 2), arr + 0);
2793 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2796 #define SDEBUG_MAX_MSELECT_SZ 512
2798 static int resp_mode_select(struct scsi_cmnd *scp,
2799 struct sdebug_dev_info *devip)
2801 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2802 int param_len, res, mpage;
2803 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2804 unsigned char *cmd = scp->cmnd;
2805 int mselect6 = (MODE_SELECT == cmd[0]);
2807 memset(arr, 0, sizeof(arr));
2810 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2811 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2812 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2813 return check_condition_result;
2815 res = fetch_to_dev_buffer(scp, arr, param_len);
2817 return DID_ERROR << 16;
2818 else if (sdebug_verbose && (res < param_len))
2819 sdev_printk(KERN_INFO, scp->device,
2820 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2821 __func__, param_len, res);
2822 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2823 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2824 off = bd_len + (mselect6 ? 4 : 8);
2825 if (md_len > 2 || off >= res) {
2826 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2827 return check_condition_result;
2829 mpage = arr[off] & 0x3f;
2830 ps = !!(arr[off] & 0x80);
2832 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2833 return check_condition_result;
2835 spf = !!(arr[off] & 0x40);
2836 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2838 if ((pg_len + off) > param_len) {
2839 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2840 PARAMETER_LIST_LENGTH_ERR, 0);
2841 return check_condition_result;
2844 case 0x8: /* Caching Mode page */
2845 if (caching_pg[1] == arr[off + 1]) {
2846 memcpy(caching_pg + 2, arr + off + 2,
2847 sizeof(caching_pg) - 2);
2848 goto set_mode_changed_ua;
2851 case 0xa: /* Control Mode page */
2852 if (ctrl_m_pg[1] == arr[off + 1]) {
2853 memcpy(ctrl_m_pg + 2, arr + off + 2,
2854 sizeof(ctrl_m_pg) - 2);
2855 if (ctrl_m_pg[4] & 0x8)
2859 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2860 goto set_mode_changed_ua;
2863 case 0x1c: /* Informational Exceptions Mode page */
2864 if (iec_m_pg[1] == arr[off + 1]) {
2865 memcpy(iec_m_pg + 2, arr + off + 2,
2866 sizeof(iec_m_pg) - 2);
2867 goto set_mode_changed_ua;
2873 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2874 return check_condition_result;
2875 set_mode_changed_ua:
2876 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2880 static int resp_temp_l_pg(unsigned char *arr)
2882 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2883 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2886 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2887 return sizeof(temp_l_pg);
2890 static int resp_ie_l_pg(unsigned char *arr)
2892 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2895 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2896 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2897 arr[4] = THRESHOLD_EXCEEDED;
2900 return sizeof(ie_l_pg);
2903 static int resp_env_rep_l_spg(unsigned char *arr)
2905 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2906 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2907 0x1, 0x0, 0x23, 0x8,
2908 0x0, 55, 72, 35, 55, 45, 0, 0,
2911 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2912 return sizeof(env_rep_l_spg);
2915 #define SDEBUG_MAX_LSENSE_SZ 512
2917 static int resp_log_sense(struct scsi_cmnd *scp,
2918 struct sdebug_dev_info *devip)
2920 int ppc, sp, pcode, subpcode;
2921 u32 alloc_len, len, n;
2922 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2923 unsigned char *cmd = scp->cmnd;
2925 memset(arr, 0, sizeof(arr));
2929 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2930 return check_condition_result;
2932 pcode = cmd[2] & 0x3f;
2933 subpcode = cmd[3] & 0xff;
2934 alloc_len = get_unaligned_be16(cmd + 7);
2936 if (0 == subpcode) {
2938 case 0x0: /* Supported log pages log page */
2940 arr[n++] = 0x0; /* this page */
2941 arr[n++] = 0xd; /* Temperature */
2942 arr[n++] = 0x2f; /* Informational exceptions */
2945 case 0xd: /* Temperature log page */
2946 arr[3] = resp_temp_l_pg(arr + 4);
2948 case 0x2f: /* Informational exceptions log page */
2949 arr[3] = resp_ie_l_pg(arr + 4);
2952 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2953 return check_condition_result;
2955 } else if (0xff == subpcode) {
2959 case 0x0: /* Supported log pages and subpages log page */
2962 arr[n++] = 0x0; /* 0,0 page */
2964 arr[n++] = 0xff; /* this page */
2966 arr[n++] = 0x0; /* Temperature */
2968 arr[n++] = 0x1; /* Environment reporting */
2970 arr[n++] = 0xff; /* all 0xd subpages */
2972 arr[n++] = 0x0; /* Informational exceptions */
2974 arr[n++] = 0xff; /* all 0x2f subpages */
2977 case 0xd: /* Temperature subpages */
2980 arr[n++] = 0x0; /* Temperature */
2982 arr[n++] = 0x1; /* Environment reporting */
2984 arr[n++] = 0xff; /* these subpages */
2987 case 0x2f: /* Informational exceptions subpages */
2990 arr[n++] = 0x0; /* Informational exceptions */
2992 arr[n++] = 0xff; /* these subpages */
2996 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2997 return check_condition_result;
2999 } else if (subpcode > 0) {
3002 if (pcode == 0xd && subpcode == 1)
3003 arr[3] = resp_env_rep_l_spg(arr + 4);
3005 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3006 return check_condition_result;
3009 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3010 return check_condition_result;
3012 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3013 return fill_from_dev_buffer(scp, arr,
3014 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3017 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3019 return devip->nr_zones != 0;
3022 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3023 unsigned long long lba)
3025 u32 zno = lba >> devip->zsize_shift;
3026 struct sdeb_zone_state *zsp;
3028 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3029 return &devip->zstate[zno];
3032 * If the zone capacity is less than the zone size, adjust for gap
3035 zno = 2 * zno - devip->nr_conv_zones;
3036 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3037 zsp = &devip->zstate[zno];
3038 if (lba >= zsp->z_start + zsp->z_size)
3040 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3044 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3046 return zsp->z_type == ZBC_ZTYPE_CNV;
3049 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3051 return zsp->z_type == ZBC_ZTYPE_GAP;
3054 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3056 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3059 static void zbc_close_zone(struct sdebug_dev_info *devip,
3060 struct sdeb_zone_state *zsp)
3062 enum sdebug_z_cond zc;
3064 if (!zbc_zone_is_seq(zsp))
3068 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3071 if (zc == ZC2_IMPLICIT_OPEN)
3072 devip->nr_imp_open--;
3074 devip->nr_exp_open--;
3076 if (zsp->z_wp == zsp->z_start) {
3077 zsp->z_cond = ZC1_EMPTY;
3079 zsp->z_cond = ZC4_CLOSED;
3084 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3086 struct sdeb_zone_state *zsp = &devip->zstate[0];
3089 for (i = 0; i < devip->nr_zones; i++, zsp++) {
3090 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3091 zbc_close_zone(devip, zsp);
3097 static void zbc_open_zone(struct sdebug_dev_info *devip,
3098 struct sdeb_zone_state *zsp, bool explicit)
3100 enum sdebug_z_cond zc;
3102 if (!zbc_zone_is_seq(zsp))
3106 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3107 (!explicit && zc == ZC2_IMPLICIT_OPEN))
3110 /* Close an implicit open zone if necessary */
3111 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3112 zbc_close_zone(devip, zsp);
3113 else if (devip->max_open &&
3114 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3115 zbc_close_imp_open_zone(devip);
3117 if (zsp->z_cond == ZC4_CLOSED)
3120 zsp->z_cond = ZC3_EXPLICIT_OPEN;
3121 devip->nr_exp_open++;
3123 zsp->z_cond = ZC2_IMPLICIT_OPEN;
3124 devip->nr_imp_open++;
3128 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3129 struct sdeb_zone_state *zsp)
3131 switch (zsp->z_cond) {
3132 case ZC2_IMPLICIT_OPEN:
3133 devip->nr_imp_open--;
3135 case ZC3_EXPLICIT_OPEN:
3136 devip->nr_exp_open--;
3139 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3140 zsp->z_start, zsp->z_cond);
3143 zsp->z_cond = ZC5_FULL;
3146 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3147 unsigned long long lba, unsigned int num)
3149 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3150 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3152 if (!zbc_zone_is_seq(zsp))
3155 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3157 if (zsp->z_wp >= zend)
3158 zbc_set_zone_full(devip, zsp);
3163 if (lba != zsp->z_wp)
3164 zsp->z_non_seq_resource = true;
3170 } else if (end > zsp->z_wp) {
3176 if (zsp->z_wp >= zend)
3177 zbc_set_zone_full(devip, zsp);
3183 zend = zsp->z_start + zsp->z_size;
3188 static int check_zbc_access_params(struct scsi_cmnd *scp,
3189 unsigned long long lba, unsigned int num, bool write)
3191 struct scsi_device *sdp = scp->device;
3192 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3193 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3194 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3197 if (devip->zmodel == BLK_ZONED_HA)
3199 /* For host-managed, reads cannot cross zone types boundaries */
3200 if (zsp->z_type != zsp_end->z_type) {
3201 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3204 return check_condition_result;
3209 /* Writing into a gap zone is not allowed */
3210 if (zbc_zone_is_gap(zsp)) {
3211 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3212 ATTEMPT_ACCESS_GAP);
3213 return check_condition_result;
3216 /* No restrictions for writes within conventional zones */
3217 if (zbc_zone_is_conv(zsp)) {
3218 if (!zbc_zone_is_conv(zsp_end)) {
3219 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3221 WRITE_BOUNDARY_ASCQ);
3222 return check_condition_result;
3227 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3228 /* Writes cannot cross sequential zone boundaries */
3229 if (zsp_end != zsp) {
3230 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3232 WRITE_BOUNDARY_ASCQ);
3233 return check_condition_result;
3235 /* Cannot write full zones */
3236 if (zsp->z_cond == ZC5_FULL) {
3237 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3238 INVALID_FIELD_IN_CDB, 0);
3239 return check_condition_result;
3241 /* Writes must be aligned to the zone WP */
3242 if (lba != zsp->z_wp) {
3243 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3245 UNALIGNED_WRITE_ASCQ);
3246 return check_condition_result;
3250 /* Handle implicit open of closed and empty zones */
3251 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3252 if (devip->max_open &&
3253 devip->nr_exp_open >= devip->max_open) {
3254 mk_sense_buffer(scp, DATA_PROTECT,
3257 return check_condition_result;
3259 zbc_open_zone(devip, zsp, false);
3265 static inline int check_device_access_params
3266 (struct scsi_cmnd *scp, unsigned long long lba,
3267 unsigned int num, bool write)
3269 struct scsi_device *sdp = scp->device;
3270 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3272 if (lba + num > sdebug_capacity) {
3273 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3274 return check_condition_result;
3276 /* transfer length excessive (tie in to block limits VPD page) */
3277 if (num > sdebug_store_sectors) {
3278 /* needs work to find which cdb byte 'num' comes from */
3279 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3280 return check_condition_result;
3282 if (write && unlikely(sdebug_wp)) {
3283 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3284 return check_condition_result;
3286 if (sdebug_dev_is_zoned(devip))
3287 return check_zbc_access_params(scp, lba, num, write);
3293 * Note: if BUG_ON() fires it usually indicates a problem with the parser
3294 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3295 * that access any of the "stores" in struct sdeb_store_info should call this
3296 * function with bug_if_fake_rw set to true.
3298 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3299 bool bug_if_fake_rw)
3301 if (sdebug_fake_rw) {
3302 BUG_ON(bug_if_fake_rw); /* See note above */
3305 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3308 /* Returns number of bytes copied or -1 if error. */
3309 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3310 u32 sg_skip, u64 lba, u32 num, bool do_write)
3313 u64 block, rest = 0;
3314 enum dma_data_direction dir;
3315 struct scsi_data_buffer *sdb = &scp->sdb;
3319 dir = DMA_TO_DEVICE;
3320 write_since_sync = true;
3322 dir = DMA_FROM_DEVICE;
3325 if (!sdb->length || !sip)
3327 if (scp->sc_data_direction != dir)
3331 block = do_div(lba, sdebug_store_sectors);
3332 if (block + num > sdebug_store_sectors)
3333 rest = block + num - sdebug_store_sectors;
3335 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3336 fsp + (block * sdebug_sector_size),
3337 (num - rest) * sdebug_sector_size, sg_skip, do_write);
3338 if (ret != (num - rest) * sdebug_sector_size)
3342 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3343 fsp, rest * sdebug_sector_size,
3344 sg_skip + ((num - rest) * sdebug_sector_size),
3351 /* Returns number of bytes copied or -1 if error. */
3352 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3354 struct scsi_data_buffer *sdb = &scp->sdb;
3358 if (scp->sc_data_direction != DMA_TO_DEVICE)
3360 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3361 num * sdebug_sector_size, 0, true);
3364 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3365 * arr into sip->storep+lba and return true. If comparison fails then
3367 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3368 const u8 *arr, bool compare_only)
3371 u64 block, rest = 0;
3372 u32 store_blks = sdebug_store_sectors;
3373 u32 lb_size = sdebug_sector_size;
3374 u8 *fsp = sip->storep;
3376 block = do_div(lba, store_blks);
3377 if (block + num > store_blks)
3378 rest = block + num - store_blks;
3380 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3384 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3390 arr += num * lb_size;
3391 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3393 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3397 static __be16 dif_compute_csum(const void *buf, int len)
3402 csum = (__force __be16)ip_compute_csum(buf, len);
3404 csum = cpu_to_be16(crc_t10dif(buf, len));
3409 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3410 sector_t sector, u32 ei_lba)
3412 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3414 if (sdt->guard_tag != csum) {
3415 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3416 (unsigned long)sector,
3417 be16_to_cpu(sdt->guard_tag),
3421 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3422 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3423 pr_err("REF check failed on sector %lu\n",
3424 (unsigned long)sector);
3427 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3428 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3429 pr_err("REF check failed on sector %lu\n",
3430 (unsigned long)sector);
3436 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3437 unsigned int sectors, bool read)
3441 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3442 scp->device->hostdata, true);
3443 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3444 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3445 struct sg_mapping_iter miter;
3447 /* Bytes of protection data to copy into sgl */
3448 resid = sectors * sizeof(*dif_storep);
3450 sg_miter_start(&miter, scsi_prot_sglist(scp),
3451 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3452 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3454 while (sg_miter_next(&miter) && resid > 0) {
3455 size_t len = min_t(size_t, miter.length, resid);
3456 void *start = dif_store(sip, sector);
3459 if (dif_store_end < start + len)
3460 rest = start + len - dif_store_end;
3465 memcpy(paddr, start, len - rest);
3467 memcpy(start, paddr, len - rest);
3471 memcpy(paddr + len - rest, dif_storep, rest);
3473 memcpy(dif_storep, paddr + len - rest, rest);
3476 sector += len / sizeof(*dif_storep);
3479 sg_miter_stop(&miter);
3482 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3483 unsigned int sectors, u32 ei_lba)
3488 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3489 scp->device->hostdata, true);
3490 struct t10_pi_tuple *sdt;
3492 for (i = 0; i < sectors; i++, ei_lba++) {
3493 sector = start_sec + i;
3494 sdt = dif_store(sip, sector);
3496 if (sdt->app_tag == cpu_to_be16(0xffff))
3500 * Because scsi_debug acts as both initiator and
3501 * target we proceed to verify the PI even if
3502 * RDPROTECT=3. This is done so the "initiator" knows
3503 * which type of error to return. Otherwise we would
3504 * have to iterate over the PI twice.
3506 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3507 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3516 dif_copy_prot(scp, start_sec, sectors, true);
3523 sdeb_read_lock(struct sdeb_store_info *sip)
3525 if (sdebug_no_rwlock) {
3527 __acquire(&sip->macc_lck);
3529 __acquire(&sdeb_fake_rw_lck);
3532 read_lock(&sip->macc_lck);
3534 read_lock(&sdeb_fake_rw_lck);
3539 sdeb_read_unlock(struct sdeb_store_info *sip)
3541 if (sdebug_no_rwlock) {
3543 __release(&sip->macc_lck);
3545 __release(&sdeb_fake_rw_lck);
3548 read_unlock(&sip->macc_lck);
3550 read_unlock(&sdeb_fake_rw_lck);
3555 sdeb_write_lock(struct sdeb_store_info *sip)
3557 if (sdebug_no_rwlock) {
3559 __acquire(&sip->macc_lck);
3561 __acquire(&sdeb_fake_rw_lck);
3564 write_lock(&sip->macc_lck);
3566 write_lock(&sdeb_fake_rw_lck);
3571 sdeb_write_unlock(struct sdeb_store_info *sip)
3573 if (sdebug_no_rwlock) {
3575 __release(&sip->macc_lck);
3577 __release(&sdeb_fake_rw_lck);
3580 write_unlock(&sip->macc_lck);
3582 write_unlock(&sdeb_fake_rw_lck);
3586 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3593 struct sdeb_store_info *sip = devip2sip(devip, true);
3594 u8 *cmd = scp->cmnd;
3599 lba = get_unaligned_be64(cmd + 2);
3600 num = get_unaligned_be32(cmd + 10);
3605 lba = get_unaligned_be32(cmd + 2);
3606 num = get_unaligned_be16(cmd + 7);
3611 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3612 (u32)(cmd[1] & 0x1f) << 16;
3613 num = (0 == cmd[4]) ? 256 : cmd[4];
3618 lba = get_unaligned_be32(cmd + 2);
3619 num = get_unaligned_be32(cmd + 6);
3622 case XDWRITEREAD_10:
3624 lba = get_unaligned_be32(cmd + 2);
3625 num = get_unaligned_be16(cmd + 7);
3628 default: /* assume READ(32) */
3629 lba = get_unaligned_be64(cmd + 12);
3630 ei_lba = get_unaligned_be32(cmd + 20);
3631 num = get_unaligned_be32(cmd + 28);
3635 if (unlikely(have_dif_prot && check_prot)) {
3636 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3638 mk_sense_invalid_opcode(scp);
3639 return check_condition_result;
3641 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3642 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3643 (cmd[1] & 0xe0) == 0)
3644 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3647 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3648 atomic_read(&sdeb_inject_pending))) {
3650 atomic_set(&sdeb_inject_pending, 0);
3653 ret = check_device_access_params(scp, lba, num, false);
3656 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3657 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3658 ((lba + num) > sdebug_medium_error_start))) {
3659 /* claim unrecoverable read error */
3660 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3661 /* set info field and valid bit for fixed descriptor */
3662 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3663 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3664 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3665 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3666 put_unaligned_be32(ret, scp->sense_buffer + 3);
3668 scsi_set_resid(scp, scsi_bufflen(scp));
3669 return check_condition_result;
3672 sdeb_read_lock(sip);
3675 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3676 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3677 case 1: /* Guard tag error */
3678 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3679 sdeb_read_unlock(sip);
3680 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3681 return check_condition_result;
3682 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3683 sdeb_read_unlock(sip);
3684 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3685 return illegal_condition_result;
3688 case 3: /* Reference tag error */
3689 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3690 sdeb_read_unlock(sip);
3691 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3692 return check_condition_result;
3693 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3694 sdeb_read_unlock(sip);
3695 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3696 return illegal_condition_result;
3702 ret = do_device_access(sip, scp, 0, lba, num, false);
3703 sdeb_read_unlock(sip);
3704 if (unlikely(ret == -1))
3705 return DID_ERROR << 16;
3707 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3709 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3710 atomic_read(&sdeb_inject_pending))) {
3711 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3712 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3713 atomic_set(&sdeb_inject_pending, 0);
3714 return check_condition_result;
3715 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3716 /* Logical block guard check failed */
3717 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3718 atomic_set(&sdeb_inject_pending, 0);
3719 return illegal_condition_result;
3720 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3721 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3722 atomic_set(&sdeb_inject_pending, 0);
3723 return illegal_condition_result;
3729 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3730 unsigned int sectors, u32 ei_lba)
3733 struct t10_pi_tuple *sdt;
3735 sector_t sector = start_sec;
3738 struct sg_mapping_iter diter;
3739 struct sg_mapping_iter piter;
3741 BUG_ON(scsi_sg_count(SCpnt) == 0);
3742 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3744 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3745 scsi_prot_sg_count(SCpnt),
3746 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3747 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3748 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3750 /* For each protection page */
3751 while (sg_miter_next(&piter)) {
3753 if (WARN_ON(!sg_miter_next(&diter))) {
3758 for (ppage_offset = 0; ppage_offset < piter.length;
3759 ppage_offset += sizeof(struct t10_pi_tuple)) {
3760 /* If we're at the end of the current
3761 * data page advance to the next one
3763 if (dpage_offset >= diter.length) {
3764 if (WARN_ON(!sg_miter_next(&diter))) {
3771 sdt = piter.addr + ppage_offset;
3772 daddr = diter.addr + dpage_offset;
3774 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3775 ret = dif_verify(sdt, daddr, sector, ei_lba);
3782 dpage_offset += sdebug_sector_size;
3784 diter.consumed = dpage_offset;
3785 sg_miter_stop(&diter);
3787 sg_miter_stop(&piter);
3789 dif_copy_prot(SCpnt, start_sec, sectors, false);
3796 sg_miter_stop(&diter);
3797 sg_miter_stop(&piter);
3801 static unsigned long lba_to_map_index(sector_t lba)
3803 if (sdebug_unmap_alignment)
3804 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3805 sector_div(lba, sdebug_unmap_granularity);
3809 static sector_t map_index_to_lba(unsigned long index)
3811 sector_t lba = index * sdebug_unmap_granularity;
3813 if (sdebug_unmap_alignment)
3814 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3818 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3822 unsigned int mapped;
3823 unsigned long index;
3826 index = lba_to_map_index(lba);
3827 mapped = test_bit(index, sip->map_storep);
3830 next = find_next_zero_bit(sip->map_storep, map_size, index);
3832 next = find_next_bit(sip->map_storep, map_size, index);
3834 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3839 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3842 sector_t end = lba + len;
3845 unsigned long index = lba_to_map_index(lba);
3847 if (index < map_size)
3848 set_bit(index, sip->map_storep);
3850 lba = map_index_to_lba(index + 1);
3854 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3857 sector_t end = lba + len;
3858 u8 *fsp = sip->storep;
3861 unsigned long index = lba_to_map_index(lba);
3863 if (lba == map_index_to_lba(index) &&
3864 lba + sdebug_unmap_granularity <= end &&
3866 clear_bit(index, sip->map_storep);
3867 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3868 memset(fsp + lba * sdebug_sector_size,
3869 (sdebug_lbprz & 1) ? 0 : 0xff,
3870 sdebug_sector_size *
3871 sdebug_unmap_granularity);
3873 if (sip->dif_storep) {
3874 memset(sip->dif_storep + lba, 0xff,
3875 sizeof(*sip->dif_storep) *
3876 sdebug_unmap_granularity);
3879 lba = map_index_to_lba(index + 1);
3883 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3890 struct sdeb_store_info *sip = devip2sip(devip, true);
3891 u8 *cmd = scp->cmnd;
3896 lba = get_unaligned_be64(cmd + 2);
3897 num = get_unaligned_be32(cmd + 10);
3902 lba = get_unaligned_be32(cmd + 2);
3903 num = get_unaligned_be16(cmd + 7);
3908 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3909 (u32)(cmd[1] & 0x1f) << 16;
3910 num = (0 == cmd[4]) ? 256 : cmd[4];
3915 lba = get_unaligned_be32(cmd + 2);
3916 num = get_unaligned_be32(cmd + 6);
3919 case 0x53: /* XDWRITEREAD(10) */
3921 lba = get_unaligned_be32(cmd + 2);
3922 num = get_unaligned_be16(cmd + 7);
3925 default: /* assume WRITE(32) */
3926 lba = get_unaligned_be64(cmd + 12);
3927 ei_lba = get_unaligned_be32(cmd + 20);
3928 num = get_unaligned_be32(cmd + 28);
3932 if (unlikely(have_dif_prot && check_prot)) {
3933 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3935 mk_sense_invalid_opcode(scp);
3936 return check_condition_result;
3938 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3939 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3940 (cmd[1] & 0xe0) == 0)
3941 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3945 sdeb_write_lock(sip);
3946 ret = check_device_access_params(scp, lba, num, true);
3948 sdeb_write_unlock(sip);
3953 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3954 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3955 case 1: /* Guard tag error */
3956 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3957 sdeb_write_unlock(sip);
3958 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3959 return illegal_condition_result;
3960 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3961 sdeb_write_unlock(sip);
3962 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3963 return check_condition_result;
3966 case 3: /* Reference tag error */
3967 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3968 sdeb_write_unlock(sip);
3969 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3970 return illegal_condition_result;
3971 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3972 sdeb_write_unlock(sip);
3973 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3974 return check_condition_result;
3980 ret = do_device_access(sip, scp, 0, lba, num, true);
3981 if (unlikely(scsi_debug_lbp()))
3982 map_region(sip, lba, num);
3983 /* If ZBC zone then bump its write pointer */
3984 if (sdebug_dev_is_zoned(devip))
3985 zbc_inc_wp(devip, lba, num);
3986 sdeb_write_unlock(sip);
3987 if (unlikely(-1 == ret))
3988 return DID_ERROR << 16;
3989 else if (unlikely(sdebug_verbose &&
3990 (ret < (num * sdebug_sector_size))))
3991 sdev_printk(KERN_INFO, scp->device,
3992 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3993 my_name, num * sdebug_sector_size, ret);
3995 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3996 atomic_read(&sdeb_inject_pending))) {
3997 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3998 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3999 atomic_set(&sdeb_inject_pending, 0);
4000 return check_condition_result;
4001 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4002 /* Logical block guard check failed */
4003 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4004 atomic_set(&sdeb_inject_pending, 0);
4005 return illegal_condition_result;
4006 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4007 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4008 atomic_set(&sdeb_inject_pending, 0);
4009 return illegal_condition_result;
4016 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4017 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4019 static int resp_write_scat(struct scsi_cmnd *scp,
4020 struct sdebug_dev_info *devip)
4022 u8 *cmd = scp->cmnd;
4025 struct sdeb_store_info *sip = devip2sip(devip, true);
4027 u16 lbdof, num_lrd, k;
4028 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4029 u32 lb_size = sdebug_sector_size;
4034 static const u32 lrd_size = 32; /* + parameter list header size */
4036 if (cmd[0] == VARIABLE_LENGTH_CMD) {
4038 wrprotect = (cmd[10] >> 5) & 0x7;
4039 lbdof = get_unaligned_be16(cmd + 12);
4040 num_lrd = get_unaligned_be16(cmd + 16);
4041 bt_len = get_unaligned_be32(cmd + 28);
4042 } else { /* that leaves WRITE SCATTERED(16) */
4044 wrprotect = (cmd[2] >> 5) & 0x7;
4045 lbdof = get_unaligned_be16(cmd + 4);
4046 num_lrd = get_unaligned_be16(cmd + 8);
4047 bt_len = get_unaligned_be32(cmd + 10);
4048 if (unlikely(have_dif_prot)) {
4049 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4051 mk_sense_invalid_opcode(scp);
4052 return illegal_condition_result;
4054 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4055 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4057 sdev_printk(KERN_ERR, scp->device,
4058 "Unprotected WR to DIF device\n");
4061 if ((num_lrd == 0) || (bt_len == 0))
4062 return 0; /* T10 says these do-nothings are not errors */
4065 sdev_printk(KERN_INFO, scp->device,
4066 "%s: %s: LB Data Offset field bad\n",
4068 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4069 return illegal_condition_result;
4071 lbdof_blen = lbdof * lb_size;
4072 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4074 sdev_printk(KERN_INFO, scp->device,
4075 "%s: %s: LBA range descriptors don't fit\n",
4077 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4078 return illegal_condition_result;
4080 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4082 return SCSI_MLQUEUE_HOST_BUSY;
4084 sdev_printk(KERN_INFO, scp->device,
4085 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4086 my_name, __func__, lbdof_blen);
4087 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4089 ret = DID_ERROR << 16;
4093 sdeb_write_lock(sip);
4094 sg_off = lbdof_blen;
4095 /* Spec says Buffer xfer Length field in number of LBs in dout */
4097 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4098 lba = get_unaligned_be64(up + 0);
4099 num = get_unaligned_be32(up + 8);
4101 sdev_printk(KERN_INFO, scp->device,
4102 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
4103 my_name, __func__, k, lba, num, sg_off);
4106 ret = check_device_access_params(scp, lba, num, true);
4108 goto err_out_unlock;
4109 num_by = num * lb_size;
4110 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4112 if ((cum_lb + num) > bt_len) {
4114 sdev_printk(KERN_INFO, scp->device,
4115 "%s: %s: sum of blocks > data provided\n",
4117 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4119 ret = illegal_condition_result;
4120 goto err_out_unlock;
4124 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4125 int prot_ret = prot_verify_write(scp, lba, num,
4129 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4131 ret = illegal_condition_result;
4132 goto err_out_unlock;
4136 ret = do_device_access(sip, scp, sg_off, lba, num, true);
4137 /* If ZBC zone then bump its write pointer */
4138 if (sdebug_dev_is_zoned(devip))
4139 zbc_inc_wp(devip, lba, num);
4140 if (unlikely(scsi_debug_lbp()))
4141 map_region(sip, lba, num);
4142 if (unlikely(-1 == ret)) {
4143 ret = DID_ERROR << 16;
4144 goto err_out_unlock;
4145 } else if (unlikely(sdebug_verbose && (ret < num_by)))
4146 sdev_printk(KERN_INFO, scp->device,
4147 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4148 my_name, num_by, ret);
4150 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4151 atomic_read(&sdeb_inject_pending))) {
4152 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4153 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4154 atomic_set(&sdeb_inject_pending, 0);
4155 ret = check_condition_result;
4156 goto err_out_unlock;
4157 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4158 /* Logical block guard check failed */
4159 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4160 atomic_set(&sdeb_inject_pending, 0);
4161 ret = illegal_condition_result;
4162 goto err_out_unlock;
4163 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4164 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4165 atomic_set(&sdeb_inject_pending, 0);
4166 ret = illegal_condition_result;
4167 goto err_out_unlock;
4175 sdeb_write_unlock(sip);
4181 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4182 u32 ei_lba, bool unmap, bool ndob)
4184 struct scsi_device *sdp = scp->device;
4185 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4186 unsigned long long i;
4188 u32 lb_size = sdebug_sector_size;
4190 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4191 scp->device->hostdata, true);
4195 sdeb_write_lock(sip);
4197 ret = check_device_access_params(scp, lba, num, true);
4199 sdeb_write_unlock(sip);
4203 if (unmap && scsi_debug_lbp()) {
4204 unmap_region(sip, lba, num);
4208 block = do_div(lbaa, sdebug_store_sectors);
4209 /* if ndob then zero 1 logical block, else fetch 1 logical block */
4211 fs1p = fsp + (block * lb_size);
4213 memset(fs1p, 0, lb_size);
4216 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4219 sdeb_write_unlock(sip);
4220 return DID_ERROR << 16;
4221 } else if (sdebug_verbose && !ndob && (ret < lb_size))
4222 sdev_printk(KERN_INFO, scp->device,
4223 "%s: %s: lb size=%u, IO sent=%d bytes\n",
4224 my_name, "write same", lb_size, ret);
4226 /* Copy first sector to remaining blocks */
4227 for (i = 1 ; i < num ; i++) {
4229 block = do_div(lbaa, sdebug_store_sectors);
4230 memmove(fsp + (block * lb_size), fs1p, lb_size);
4232 if (scsi_debug_lbp())
4233 map_region(sip, lba, num);
4234 /* If ZBC zone then bump its write pointer */
4235 if (sdebug_dev_is_zoned(devip))
4236 zbc_inc_wp(devip, lba, num);
4238 sdeb_write_unlock(sip);
4243 static int resp_write_same_10(struct scsi_cmnd *scp,
4244 struct sdebug_dev_info *devip)
4246 u8 *cmd = scp->cmnd;
4253 if (sdebug_lbpws10 == 0) {
4254 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4255 return check_condition_result;
4259 lba = get_unaligned_be32(cmd + 2);
4260 num = get_unaligned_be16(cmd + 7);
4261 if (num > sdebug_write_same_length) {
4262 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4263 return check_condition_result;
4265 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4268 static int resp_write_same_16(struct scsi_cmnd *scp,
4269 struct sdebug_dev_info *devip)
4271 u8 *cmd = scp->cmnd;
4278 if (cmd[1] & 0x8) { /* UNMAP */
4279 if (sdebug_lbpws == 0) {
4280 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4281 return check_condition_result;
4285 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
4287 lba = get_unaligned_be64(cmd + 2);
4288 num = get_unaligned_be32(cmd + 10);
4289 if (num > sdebug_write_same_length) {
4290 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4291 return check_condition_result;
4293 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4296 /* Note the mode field is in the same position as the (lower) service action
4297 * field. For the Report supported operation codes command, SPC-4 suggests
4298 * each mode of this command should be reported separately; for future. */
4299 static int resp_write_buffer(struct scsi_cmnd *scp,
4300 struct sdebug_dev_info *devip)
4302 u8 *cmd = scp->cmnd;
4303 struct scsi_device *sdp = scp->device;
4304 struct sdebug_dev_info *dp;
4307 mode = cmd[1] & 0x1f;
4309 case 0x4: /* download microcode (MC) and activate (ACT) */
4310 /* set UAs on this device only */
4311 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4312 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4314 case 0x5: /* download MC, save and ACT */
4315 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4317 case 0x6: /* download MC with offsets and ACT */
4318 /* set UAs on most devices (LUs) in this target */
4319 list_for_each_entry(dp,
4320 &devip->sdbg_host->dev_info_list,
4322 if (dp->target == sdp->id) {
4323 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4325 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4329 case 0x7: /* download MC with offsets, save, and ACT */
4330 /* set UA on all devices (LUs) in this target */
4331 list_for_each_entry(dp,
4332 &devip->sdbg_host->dev_info_list,
4334 if (dp->target == sdp->id)
4335 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4339 /* do nothing for this command for other mode values */
4345 static int resp_comp_write(struct scsi_cmnd *scp,
4346 struct sdebug_dev_info *devip)
4348 u8 *cmd = scp->cmnd;
4350 struct sdeb_store_info *sip = devip2sip(devip, true);
4353 u32 lb_size = sdebug_sector_size;
4358 lba = get_unaligned_be64(cmd + 2);
4359 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
4361 return 0; /* degenerate case, not an error */
4362 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4364 mk_sense_invalid_opcode(scp);
4365 return check_condition_result;
4367 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4368 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4369 (cmd[1] & 0xe0) == 0)
4370 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4372 ret = check_device_access_params(scp, lba, num, false);
4376 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4378 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4380 return check_condition_result;
4383 sdeb_write_lock(sip);
4385 ret = do_dout_fetch(scp, dnum, arr);
4387 retval = DID_ERROR << 16;
4389 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4390 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4391 "indicated=%u, IO sent=%d bytes\n", my_name,
4392 dnum * lb_size, ret);
4393 if (!comp_write_worker(sip, lba, num, arr, false)) {
4394 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4395 retval = check_condition_result;
4398 if (scsi_debug_lbp())
4399 map_region(sip, lba, num);
4401 sdeb_write_unlock(sip);
4406 struct unmap_block_desc {
4412 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4415 struct unmap_block_desc *desc;
4416 struct sdeb_store_info *sip = devip2sip(devip, true);
4417 unsigned int i, payload_len, descriptors;
4420 if (!scsi_debug_lbp())
4421 return 0; /* fib and say its done */
4422 payload_len = get_unaligned_be16(scp->cmnd + 7);
4423 BUG_ON(scsi_bufflen(scp) != payload_len);
4425 descriptors = (payload_len - 8) / 16;
4426 if (descriptors > sdebug_unmap_max_desc) {
4427 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4428 return check_condition_result;
4431 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4433 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4435 return check_condition_result;
4438 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4440 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4441 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4443 desc = (void *)&buf[8];
4445 sdeb_write_lock(sip);
4447 for (i = 0 ; i < descriptors ; i++) {
4448 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4449 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4451 ret = check_device_access_params(scp, lba, num, true);
4455 unmap_region(sip, lba, num);
4461 sdeb_write_unlock(sip);
4467 #define SDEBUG_GET_LBA_STATUS_LEN 32
4469 static int resp_get_lba_status(struct scsi_cmnd *scp,
4470 struct sdebug_dev_info *devip)
4472 u8 *cmd = scp->cmnd;
4474 u32 alloc_len, mapped, num;
4476 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4478 lba = get_unaligned_be64(cmd + 2);
4479 alloc_len = get_unaligned_be32(cmd + 10);
4484 ret = check_device_access_params(scp, lba, 1, false);
4488 if (scsi_debug_lbp()) {
4489 struct sdeb_store_info *sip = devip2sip(devip, true);
4491 mapped = map_state(sip, lba, &num);
4494 /* following just in case virtual_gb changed */
4495 sdebug_capacity = get_sdebug_capacity();
4496 if (sdebug_capacity - lba <= 0xffffffff)
4497 num = sdebug_capacity - lba;
4502 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4503 put_unaligned_be32(20, arr); /* Parameter Data Length */
4504 put_unaligned_be64(lba, arr + 8); /* LBA */
4505 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4506 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4508 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4511 static int resp_sync_cache(struct scsi_cmnd *scp,
4512 struct sdebug_dev_info *devip)
4517 u8 *cmd = scp->cmnd;
4519 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4520 lba = get_unaligned_be32(cmd + 2);
4521 num_blocks = get_unaligned_be16(cmd + 7);
4522 } else { /* SYNCHRONIZE_CACHE(16) */
4523 lba = get_unaligned_be64(cmd + 2);
4524 num_blocks = get_unaligned_be32(cmd + 10);
4526 if (lba + num_blocks > sdebug_capacity) {
4527 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4528 return check_condition_result;
4530 if (!write_since_sync || (cmd[1] & 0x2))
4531 res = SDEG_RES_IMMED_MASK;
4532 else /* delay if write_since_sync and IMMED clear */
4533 write_since_sync = false;
4538 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4539 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4540 * a GOOD status otherwise. Model a disk with a big cache and yield
4541 * CONDITION MET. Actually tries to bring range in main memory into the
4542 * cache associated with the CPU(s).
4544 static int resp_pre_fetch(struct scsi_cmnd *scp,
4545 struct sdebug_dev_info *devip)
4549 u64 block, rest = 0;
4551 u8 *cmd = scp->cmnd;
4552 struct sdeb_store_info *sip = devip2sip(devip, true);
4553 u8 *fsp = sip->storep;
4555 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4556 lba = get_unaligned_be32(cmd + 2);
4557 nblks = get_unaligned_be16(cmd + 7);
4558 } else { /* PRE-FETCH(16) */
4559 lba = get_unaligned_be64(cmd + 2);
4560 nblks = get_unaligned_be32(cmd + 10);
4562 if (lba + nblks > sdebug_capacity) {
4563 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4564 return check_condition_result;
4568 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4569 block = do_div(lba, sdebug_store_sectors);
4570 if (block + nblks > sdebug_store_sectors)
4571 rest = block + nblks - sdebug_store_sectors;
4573 /* Try to bring the PRE-FETCH range into CPU's cache */
4574 sdeb_read_lock(sip);
4575 prefetch_range(fsp + (sdebug_sector_size * block),
4576 (nblks - rest) * sdebug_sector_size);
4578 prefetch_range(fsp, rest * sdebug_sector_size);
4579 sdeb_read_unlock(sip);
4582 res = SDEG_RES_IMMED_MASK;
4583 return res | condition_met_result;
4586 #define RL_BUCKET_ELEMS 8
4588 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4589 * (W-LUN), the normal Linux scanning logic does not associate it with a
4590 * device (e.g. /dev/sg7). The following magic will make that association:
4591 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4592 * where <n> is a host number. If there are multiple targets in a host then
4593 * the above will associate a W-LUN to each target. To only get a W-LUN
4594 * for target 2, then use "echo '- 2 49409' > scan" .
4596 static int resp_report_luns(struct scsi_cmnd *scp,
4597 struct sdebug_dev_info *devip)
4599 unsigned char *cmd = scp->cmnd;
4600 unsigned int alloc_len;
4601 unsigned char select_report;
4603 struct scsi_lun *lun_p;
4604 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4605 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4606 unsigned int wlun_cnt; /* report luns W-LUN count */
4607 unsigned int tlun_cnt; /* total LUN count */
4608 unsigned int rlen; /* response length (in bytes) */
4610 unsigned int off_rsp = 0;
4611 const int sz_lun = sizeof(struct scsi_lun);
4613 clear_luns_changed_on_target(devip);
4615 select_report = cmd[2];
4616 alloc_len = get_unaligned_be32(cmd + 6);
4618 if (alloc_len < 4) {
4619 pr_err("alloc len too small %d\n", alloc_len);
4620 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4621 return check_condition_result;
4624 switch (select_report) {
4625 case 0: /* all LUNs apart from W-LUNs */
4626 lun_cnt = sdebug_max_luns;
4629 case 1: /* only W-LUNs */
4633 case 2: /* all LUNs */
4634 lun_cnt = sdebug_max_luns;
4637 case 0x10: /* only administrative LUs */
4638 case 0x11: /* see SPC-5 */
4639 case 0x12: /* only subsiduary LUs owned by referenced LU */
4641 pr_debug("select report invalid %d\n", select_report);
4642 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4643 return check_condition_result;
4646 if (sdebug_no_lun_0 && (lun_cnt > 0))
4649 tlun_cnt = lun_cnt + wlun_cnt;
4650 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4651 scsi_set_resid(scp, scsi_bufflen(scp));
4652 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4653 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4655 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4656 lun = sdebug_no_lun_0 ? 1 : 0;
4657 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4658 memset(arr, 0, sizeof(arr));
4659 lun_p = (struct scsi_lun *)&arr[0];
4661 put_unaligned_be32(rlen, &arr[0]);
4665 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4666 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4668 int_to_scsilun(lun++, lun_p);
4669 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4670 lun_p->scsi_lun[0] |= 0x40;
4672 if (j < RL_BUCKET_ELEMS)
4675 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4681 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4685 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4689 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4691 bool is_bytchk3 = false;
4694 u32 vnum, a_num, off;
4695 const u32 lb_size = sdebug_sector_size;
4698 u8 *cmd = scp->cmnd;
4699 struct sdeb_store_info *sip = devip2sip(devip, true);
4701 bytchk = (cmd[1] >> 1) & 0x3;
4703 return 0; /* always claim internal verify okay */
4704 } else if (bytchk == 2) {
4705 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4706 return check_condition_result;
4707 } else if (bytchk == 3) {
4708 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4712 lba = get_unaligned_be64(cmd + 2);
4713 vnum = get_unaligned_be32(cmd + 10);
4715 case VERIFY: /* is VERIFY(10) */
4716 lba = get_unaligned_be32(cmd + 2);
4717 vnum = get_unaligned_be16(cmd + 7);
4720 mk_sense_invalid_opcode(scp);
4721 return check_condition_result;
4724 return 0; /* not an error */
4725 a_num = is_bytchk3 ? 1 : vnum;
4726 /* Treat following check like one for read (i.e. no write) access */
4727 ret = check_device_access_params(scp, lba, a_num, false);
4731 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4733 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4735 return check_condition_result;
4737 /* Not changing store, so only need read access */
4738 sdeb_read_lock(sip);
4740 ret = do_dout_fetch(scp, a_num, arr);
4742 ret = DID_ERROR << 16;
4744 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4745 sdev_printk(KERN_INFO, scp->device,
4746 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4747 my_name, __func__, a_num * lb_size, ret);
4750 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4751 memcpy(arr + off, arr, lb_size);
4754 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4755 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4756 ret = check_condition_result;
4760 sdeb_read_unlock(sip);
4765 #define RZONES_DESC_HD 64
4767 /* Report zones depending on start LBA and reporting options */
4768 static int resp_report_zones(struct scsi_cmnd *scp,
4769 struct sdebug_dev_info *devip)
4771 unsigned int rep_max_zones, nrz = 0;
4773 u32 alloc_len, rep_opts, rep_len;
4776 u8 *arr = NULL, *desc;
4777 u8 *cmd = scp->cmnd;
4778 struct sdeb_zone_state *zsp = NULL;
4779 struct sdeb_store_info *sip = devip2sip(devip, false);
4781 if (!sdebug_dev_is_zoned(devip)) {
4782 mk_sense_invalid_opcode(scp);
4783 return check_condition_result;
4785 zs_lba = get_unaligned_be64(cmd + 2);
4786 alloc_len = get_unaligned_be32(cmd + 10);
4788 return 0; /* not an error */
4789 rep_opts = cmd[14] & 0x3f;
4790 partial = cmd[14] & 0x80;
4792 if (zs_lba >= sdebug_capacity) {
4793 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4794 return check_condition_result;
4797 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4799 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4801 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4803 return check_condition_result;
4806 sdeb_read_lock(sip);
4809 for (lba = zs_lba; lba < sdebug_capacity;
4810 lba = zsp->z_start + zsp->z_size) {
4811 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4813 zsp = zbc_zone(devip, lba);
4820 if (zsp->z_cond != ZC1_EMPTY)
4824 /* Implicit open zones */
4825 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4829 /* Explicit open zones */
4830 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4835 if (zsp->z_cond != ZC4_CLOSED)
4840 if (zsp->z_cond != ZC5_FULL)
4847 * Read-only, offline, reset WP recommended are
4848 * not emulated: no zones to report;
4852 /* non-seq-resource set */
4853 if (!zsp->z_non_seq_resource)
4857 /* All zones except gap zones. */
4858 if (zbc_zone_is_gap(zsp))
4862 /* Not write pointer (conventional) zones */
4863 if (zbc_zone_is_seq(zsp))
4867 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4868 INVALID_FIELD_IN_CDB, 0);
4869 ret = check_condition_result;
4873 if (nrz < rep_max_zones) {
4874 /* Fill zone descriptor */
4875 desc[0] = zsp->z_type;
4876 desc[1] = zsp->z_cond << 4;
4877 if (zsp->z_non_seq_resource)
4879 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4880 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4881 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4885 if (partial && nrz >= rep_max_zones)
4892 /* Zone list length. */
4893 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4895 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4896 /* Zone starting LBA granularity. */
4897 if (devip->zcap < devip->zsize)
4898 put_unaligned_be64(devip->zsize, arr + 16);
4900 rep_len = (unsigned long)desc - (unsigned long)arr;
4901 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4904 sdeb_read_unlock(sip);
4909 /* Logic transplanted from tcmu-runner, file_zbc.c */
4910 static void zbc_open_all(struct sdebug_dev_info *devip)
4912 struct sdeb_zone_state *zsp = &devip->zstate[0];
4915 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4916 if (zsp->z_cond == ZC4_CLOSED)
4917 zbc_open_zone(devip, &devip->zstate[i], true);
4921 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4925 enum sdebug_z_cond zc;
4926 u8 *cmd = scp->cmnd;
4927 struct sdeb_zone_state *zsp;
4928 bool all = cmd[14] & 0x01;
4929 struct sdeb_store_info *sip = devip2sip(devip, false);
4931 if (!sdebug_dev_is_zoned(devip)) {
4932 mk_sense_invalid_opcode(scp);
4933 return check_condition_result;
4936 sdeb_write_lock(sip);
4939 /* Check if all closed zones can be open */
4940 if (devip->max_open &&
4941 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4942 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4944 res = check_condition_result;
4947 /* Open all closed zones */
4948 zbc_open_all(devip);
4952 /* Open the specified zone */
4953 z_id = get_unaligned_be64(cmd + 2);
4954 if (z_id >= sdebug_capacity) {
4955 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4956 res = check_condition_result;
4960 zsp = zbc_zone(devip, z_id);
4961 if (z_id != zsp->z_start) {
4962 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4963 res = check_condition_result;
4966 if (zbc_zone_is_conv(zsp)) {
4967 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4968 res = check_condition_result;
4973 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4976 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4977 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4979 res = check_condition_result;
4983 zbc_open_zone(devip, zsp, true);
4985 sdeb_write_unlock(sip);
4989 static void zbc_close_all(struct sdebug_dev_info *devip)
4993 for (i = 0; i < devip->nr_zones; i++)
4994 zbc_close_zone(devip, &devip->zstate[i]);
4997 static int resp_close_zone(struct scsi_cmnd *scp,
4998 struct sdebug_dev_info *devip)
5002 u8 *cmd = scp->cmnd;
5003 struct sdeb_zone_state *zsp;
5004 bool all = cmd[14] & 0x01;
5005 struct sdeb_store_info *sip = devip2sip(devip, false);
5007 if (!sdebug_dev_is_zoned(devip)) {
5008 mk_sense_invalid_opcode(scp);
5009 return check_condition_result;
5012 sdeb_write_lock(sip);
5015 zbc_close_all(devip);
5019 /* Close specified zone */
5020 z_id = get_unaligned_be64(cmd + 2);
5021 if (z_id >= sdebug_capacity) {
5022 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5023 res = check_condition_result;
5027 zsp = zbc_zone(devip, z_id);
5028 if (z_id != zsp->z_start) {
5029 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5030 res = check_condition_result;
5033 if (zbc_zone_is_conv(zsp)) {
5034 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5035 res = check_condition_result;
5039 zbc_close_zone(devip, zsp);
5041 sdeb_write_unlock(sip);
5045 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5046 struct sdeb_zone_state *zsp, bool empty)
5048 enum sdebug_z_cond zc = zsp->z_cond;
5050 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5051 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5052 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5053 zbc_close_zone(devip, zsp);
5054 if (zsp->z_cond == ZC4_CLOSED)
5056 zsp->z_wp = zsp->z_start + zsp->z_size;
5057 zsp->z_cond = ZC5_FULL;
5061 static void zbc_finish_all(struct sdebug_dev_info *devip)
5065 for (i = 0; i < devip->nr_zones; i++)
5066 zbc_finish_zone(devip, &devip->zstate[i], false);
5069 static int resp_finish_zone(struct scsi_cmnd *scp,
5070 struct sdebug_dev_info *devip)
5072 struct sdeb_zone_state *zsp;
5075 u8 *cmd = scp->cmnd;
5076 bool all = cmd[14] & 0x01;
5077 struct sdeb_store_info *sip = devip2sip(devip, false);
5079 if (!sdebug_dev_is_zoned(devip)) {
5080 mk_sense_invalid_opcode(scp);
5081 return check_condition_result;
5084 sdeb_write_lock(sip);
5087 zbc_finish_all(devip);
5091 /* Finish the specified zone */
5092 z_id = get_unaligned_be64(cmd + 2);
5093 if (z_id >= sdebug_capacity) {
5094 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5095 res = check_condition_result;
5099 zsp = zbc_zone(devip, z_id);
5100 if (z_id != zsp->z_start) {
5101 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5102 res = check_condition_result;
5105 if (zbc_zone_is_conv(zsp)) {
5106 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5107 res = check_condition_result;
5111 zbc_finish_zone(devip, zsp, true);
5113 sdeb_write_unlock(sip);
5117 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5118 struct sdeb_zone_state *zsp)
5120 enum sdebug_z_cond zc;
5121 struct sdeb_store_info *sip = devip2sip(devip, false);
5123 if (!zbc_zone_is_seq(zsp))
5127 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5128 zbc_close_zone(devip, zsp);
5130 if (zsp->z_cond == ZC4_CLOSED)
5133 if (zsp->z_wp > zsp->z_start)
5134 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5135 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5137 zsp->z_non_seq_resource = false;
5138 zsp->z_wp = zsp->z_start;
5139 zsp->z_cond = ZC1_EMPTY;
5142 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5146 for (i = 0; i < devip->nr_zones; i++)
5147 zbc_rwp_zone(devip, &devip->zstate[i]);
5150 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5152 struct sdeb_zone_state *zsp;
5155 u8 *cmd = scp->cmnd;
5156 bool all = cmd[14] & 0x01;
5157 struct sdeb_store_info *sip = devip2sip(devip, false);
5159 if (!sdebug_dev_is_zoned(devip)) {
5160 mk_sense_invalid_opcode(scp);
5161 return check_condition_result;
5164 sdeb_write_lock(sip);
5171 z_id = get_unaligned_be64(cmd + 2);
5172 if (z_id >= sdebug_capacity) {
5173 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5174 res = check_condition_result;
5178 zsp = zbc_zone(devip, z_id);
5179 if (z_id != zsp->z_start) {
5180 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5181 res = check_condition_result;
5184 if (zbc_zone_is_conv(zsp)) {
5185 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5186 res = check_condition_result;
5190 zbc_rwp_zone(devip, zsp);
5192 sdeb_write_unlock(sip);
5196 static u32 get_tag(struct scsi_cmnd *cmnd)
5198 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5201 /* Queued (deferred) command completions converge here. */
5202 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5204 struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5205 unsigned long flags;
5206 struct scsi_cmnd *scp = sqcp->scmd;
5207 struct sdebug_scsi_cmd *sdsc;
5210 if (sdebug_statistics) {
5211 atomic_inc(&sdebug_completions);
5212 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5213 atomic_inc(&sdebug_miss_cpus);
5217 pr_err("scmd=NULL\n");
5221 sdsc = scsi_cmd_priv(scp);
5222 spin_lock_irqsave(&sdsc->lock, flags);
5223 aborted = sd_dp->aborted;
5224 if (unlikely(aborted))
5225 sd_dp->aborted = false;
5226 ASSIGN_QUEUED_CMD(scp, NULL);
5228 spin_unlock_irqrestore(&sdsc->lock, flags);
5231 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5232 blk_abort_request(scsi_cmd_to_rq(scp));
5236 scsi_done(scp); /* callback to mid level */
5238 sdebug_free_queued_cmd(sqcp);
5241 /* When high resolution timer goes off this function is called. */
5242 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5244 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5246 sdebug_q_cmd_complete(sd_dp);
5247 return HRTIMER_NORESTART;
5250 /* When work queue schedules work, it calls this function. */
5251 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5253 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5255 sdebug_q_cmd_complete(sd_dp);
5258 static bool got_shared_uuid;
5259 static uuid_t shared_uuid;
5261 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5263 struct sdeb_zone_state *zsp;
5264 sector_t capacity = get_sdebug_capacity();
5265 sector_t conv_capacity;
5266 sector_t zstart = 0;
5270 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5271 * a zone size allowing for at least 4 zones on the device. Otherwise,
5272 * use the specified zone size checking that at least 2 zones can be
5273 * created for the device.
5275 if (!sdeb_zbc_zone_size_mb) {
5276 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5277 >> ilog2(sdebug_sector_size);
5278 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5280 if (devip->zsize < 2) {
5281 pr_err("Device capacity too small\n");
5285 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5286 pr_err("Zone size is not a power of 2\n");
5289 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5290 >> ilog2(sdebug_sector_size);
5291 if (devip->zsize >= capacity) {
5292 pr_err("Zone size too large for device capacity\n");
5297 devip->zsize_shift = ilog2(devip->zsize);
5298 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5300 if (sdeb_zbc_zone_cap_mb == 0) {
5301 devip->zcap = devip->zsize;
5303 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5304 ilog2(sdebug_sector_size);
5305 if (devip->zcap > devip->zsize) {
5306 pr_err("Zone capacity too large\n");
5311 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5312 if (conv_capacity >= capacity) {
5313 pr_err("Number of conventional zones too large\n");
5316 devip->nr_conv_zones = sdeb_zbc_nr_conv;
5317 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5319 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5321 /* Add gap zones if zone capacity is smaller than the zone size */
5322 if (devip->zcap < devip->zsize)
5323 devip->nr_zones += devip->nr_seq_zones;
5325 if (devip->zmodel == BLK_ZONED_HM) {
5326 /* zbc_max_open_zones can be 0, meaning "not reported" */
5327 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5328 devip->max_open = (devip->nr_zones - 1) / 2;
5330 devip->max_open = sdeb_zbc_max_open;
5333 devip->zstate = kcalloc(devip->nr_zones,
5334 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5338 for (i = 0; i < devip->nr_zones; i++) {
5339 zsp = &devip->zstate[i];
5341 zsp->z_start = zstart;
5343 if (i < devip->nr_conv_zones) {
5344 zsp->z_type = ZBC_ZTYPE_CNV;
5345 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5346 zsp->z_wp = (sector_t)-1;
5348 min_t(u64, devip->zsize, capacity - zstart);
5349 } else if ((zstart & (devip->zsize - 1)) == 0) {
5350 if (devip->zmodel == BLK_ZONED_HM)
5351 zsp->z_type = ZBC_ZTYPE_SWR;
5353 zsp->z_type = ZBC_ZTYPE_SWP;
5354 zsp->z_cond = ZC1_EMPTY;
5355 zsp->z_wp = zsp->z_start;
5357 min_t(u64, devip->zcap, capacity - zstart);
5359 zsp->z_type = ZBC_ZTYPE_GAP;
5360 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5361 zsp->z_wp = (sector_t)-1;
5362 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5366 WARN_ON_ONCE((int)zsp->z_size <= 0);
5367 zstart += zsp->z_size;
5373 static struct sdebug_dev_info *sdebug_device_create(
5374 struct sdebug_host_info *sdbg_host, gfp_t flags)
5376 struct sdebug_dev_info *devip;
5378 devip = kzalloc(sizeof(*devip), flags);
5380 if (sdebug_uuid_ctl == 1)
5381 uuid_gen(&devip->lu_name);
5382 else if (sdebug_uuid_ctl == 2) {
5383 if (got_shared_uuid)
5384 devip->lu_name = shared_uuid;
5386 uuid_gen(&shared_uuid);
5387 got_shared_uuid = true;
5388 devip->lu_name = shared_uuid;
5391 devip->sdbg_host = sdbg_host;
5392 if (sdeb_zbc_in_use) {
5393 devip->zmodel = sdeb_zbc_model;
5394 if (sdebug_device_create_zones(devip)) {
5399 devip->zmodel = BLK_ZONED_NONE;
5401 devip->create_ts = ktime_get_boottime();
5402 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5403 spin_lock_init(&devip->list_lock);
5404 INIT_LIST_HEAD(&devip->inject_err_list);
5405 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5410 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5412 struct sdebug_host_info *sdbg_host;
5413 struct sdebug_dev_info *open_devip = NULL;
5414 struct sdebug_dev_info *devip;
5416 sdbg_host = shost_to_sdebug_host(sdev->host);
5418 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5419 if ((devip->used) && (devip->channel == sdev->channel) &&
5420 (devip->target == sdev->id) &&
5421 (devip->lun == sdev->lun))
5424 if ((!devip->used) && (!open_devip))
5428 if (!open_devip) { /* try and make a new one */
5429 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5431 pr_err("out of memory at line %d\n", __LINE__);
5436 open_devip->channel = sdev->channel;
5437 open_devip->target = sdev->id;
5438 open_devip->lun = sdev->lun;
5439 open_devip->sdbg_host = sdbg_host;
5440 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5441 open_devip->used = true;
5445 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5448 pr_info("slave_alloc <%u %u %u %llu>\n",
5449 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5454 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5456 struct sdebug_dev_info *devip =
5457 (struct sdebug_dev_info *)sdp->hostdata;
5458 struct dentry *dentry;
5461 pr_info("slave_configure <%u %u %u %llu>\n",
5462 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5463 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5464 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5465 if (devip == NULL) {
5466 devip = find_build_dev_info(sdp);
5468 return 1; /* no resources, will be marked offline */
5470 sdp->hostdata = devip;
5472 sdp->no_uld_attach = 1;
5473 config_cdb_len(sdp);
5475 if (sdebug_allow_restart)
5476 sdp->allow_restart = 1;
5478 devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5479 sdebug_debugfs_root);
5480 if (IS_ERR_OR_NULL(devip->debugfs_entry))
5481 pr_info("%s: failed to create debugfs directory for device %s\n",
5482 __func__, dev_name(&sdp->sdev_gendev));
5484 dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5485 &sdebug_error_fops);
5486 if (IS_ERR_OR_NULL(dentry))
5487 pr_info("%s: failed to create error file for device %s\n",
5488 __func__, dev_name(&sdp->sdev_gendev));
5493 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5495 struct sdebug_dev_info *devip =
5496 (struct sdebug_dev_info *)sdp->hostdata;
5497 struct sdebug_err_inject *err;
5500 pr_info("slave_destroy <%u %u %u %llu>\n",
5501 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5506 spin_lock(&devip->list_lock);
5507 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5508 list_del_rcu(&err->list);
5509 call_rcu(&err->rcu, sdebug_err_free);
5511 spin_unlock(&devip->list_lock);
5513 debugfs_remove(devip->debugfs_entry);
5515 /* make this slot available for re-use */
5516 devip->used = false;
5517 sdp->hostdata = NULL;
5520 /* Returns true if we require the queued memory to be freed by the caller. */
5521 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5522 enum sdeb_defer_type defer_t)
5524 if (defer_t == SDEB_DEFER_HRT) {
5525 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5528 case 0: /* Not active, it must have already run */
5529 case -1: /* -1 It's executing the CB */
5531 case 1: /* Was active, we've now cancelled */
5535 } else if (defer_t == SDEB_DEFER_WQ) {
5536 /* Cancel if pending */
5537 if (cancel_work_sync(&sd_dp->ew.work))
5539 /* Was not pending, so it must have run */
5541 } else if (defer_t == SDEB_DEFER_POLL) {
5549 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5551 enum sdeb_defer_type l_defer_t;
5552 struct sdebug_defer *sd_dp;
5553 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5554 struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5556 lockdep_assert_held(&sdsc->lock);
5560 sd_dp = &sqcp->sd_dp;
5561 l_defer_t = READ_ONCE(sd_dp->defer_t);
5562 ASSIGN_QUEUED_CMD(cmnd, NULL);
5564 if (stop_qc_helper(sd_dp, l_defer_t))
5565 sdebug_free_queued_cmd(sqcp);
5571 * Called from scsi_debug_abort() only, which is for timed-out cmd.
5573 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5575 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5576 unsigned long flags;
5579 spin_lock_irqsave(&sdsc->lock, flags);
5580 res = scsi_debug_stop_cmnd(cmnd);
5581 spin_unlock_irqrestore(&sdsc->lock, flags);
5587 * All we can do is set the cmnd as internally aborted and wait for it to
5588 * finish. We cannot call scsi_done() as normal completion path may do that.
5590 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5592 scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5597 /* Deletes (stops) timers or work queues of all queued commands */
5598 static void stop_all_queued(void)
5600 struct sdebug_host_info *sdhp;
5602 mutex_lock(&sdebug_host_list_mutex);
5603 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5604 struct Scsi_Host *shost = sdhp->shost;
5606 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5608 mutex_unlock(&sdebug_host_list_mutex);
5611 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
5613 struct scsi_device *sdp = cmnd->device;
5614 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5615 struct sdebug_err_inject *err;
5616 unsigned char *cmd = cmnd->cmnd;
5623 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5624 if (err->type == ERR_ABORT_CMD_FAILED &&
5625 (err->cmd == cmd[0] || err->cmd == 0xff)) {
5639 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5641 bool ok = scsi_debug_abort_cmnd(SCpnt);
5642 u8 *cmd = SCpnt->cmnd;
5647 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5648 sdev_printk(KERN_INFO, SCpnt->device,
5649 "%s: command%s found\n", __func__,
5652 if (sdebug_fail_abort(SCpnt)) {
5653 scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
5661 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5663 struct scsi_device *sdp = data;
5664 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5666 if (scmd->device == sdp)
5667 scsi_debug_abort_cmnd(scmd);
5672 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5673 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5675 struct Scsi_Host *shost = sdp->host;
5677 blk_mq_tagset_busy_iter(&shost->tag_set,
5678 scsi_debug_stop_all_queued_iter, sdp);
5681 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
5683 struct scsi_device *sdp = cmnd->device;
5684 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5685 struct sdebug_err_inject *err;
5686 unsigned char *cmd = cmnd->cmnd;
5693 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5694 if (err->type == ERR_LUN_RESET_FAILED &&
5695 (err->cmd == cmd[0] || err->cmd == 0xff)) {
5709 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5711 struct scsi_device *sdp = SCpnt->device;
5712 struct sdebug_dev_info *devip = sdp->hostdata;
5713 u8 *cmd = SCpnt->cmnd;
5718 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5719 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5721 scsi_debug_stop_all_queued(sdp);
5723 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5725 if (sdebug_fail_lun_reset(SCpnt)) {
5726 scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
5733 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
5735 struct scsi_target *starget = scsi_target(cmnd->device);
5736 struct sdebug_target_info *targetip =
5737 (struct sdebug_target_info *)starget->hostdata;
5740 return targetip->reset_fail;
5745 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5747 struct scsi_device *sdp = SCpnt->device;
5748 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5749 struct sdebug_dev_info *devip;
5750 u8 *cmd = SCpnt->cmnd;
5754 ++num_target_resets;
5755 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5756 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5758 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5759 if (devip->target == sdp->id) {
5760 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5765 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5766 sdev_printk(KERN_INFO, sdp,
5767 "%s: %d device(s) found in target\n", __func__, k);
5769 if (sdebug_fail_target_reset(SCpnt)) {
5770 scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
5778 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5780 struct scsi_device *sdp = SCpnt->device;
5781 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5782 struct sdebug_dev_info *devip;
5787 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5788 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5790 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5791 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5795 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5796 sdev_printk(KERN_INFO, sdp,
5797 "%s: %d device(s) found in host\n", __func__, k);
5801 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5803 struct sdebug_host_info *sdbg_host;
5804 struct sdebug_dev_info *devip;
5808 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5809 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5810 mutex_lock(&sdebug_host_list_mutex);
5811 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5812 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5814 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5818 mutex_unlock(&sdebug_host_list_mutex);
5820 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5821 sdev_printk(KERN_INFO, SCpnt->device,
5822 "%s: %d device(s) found\n", __func__, k);
5826 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5828 struct msdos_partition *pp;
5829 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5830 int sectors_per_part, num_sectors, k;
5831 int heads_by_sects, start_sec, end_sec;
5833 /* assume partition table already zeroed */
5834 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5836 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5837 sdebug_num_parts = SDEBUG_MAX_PARTS;
5838 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5840 num_sectors = (int)get_sdebug_capacity();
5841 sectors_per_part = (num_sectors - sdebug_sectors_per)
5843 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5844 starts[0] = sdebug_sectors_per;
5845 max_part_secs = sectors_per_part;
5846 for (k = 1; k < sdebug_num_parts; ++k) {
5847 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5849 if (starts[k] - starts[k - 1] < max_part_secs)
5850 max_part_secs = starts[k] - starts[k - 1];
5852 starts[sdebug_num_parts] = num_sectors;
5853 starts[sdebug_num_parts + 1] = 0;
5855 ramp[510] = 0x55; /* magic partition markings */
5857 pp = (struct msdos_partition *)(ramp + 0x1be);
5858 for (k = 0; starts[k + 1]; ++k, ++pp) {
5859 start_sec = starts[k];
5860 end_sec = starts[k] + max_part_secs - 1;
5863 pp->cyl = start_sec / heads_by_sects;
5864 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5865 / sdebug_sectors_per;
5866 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5868 pp->end_cyl = end_sec / heads_by_sects;
5869 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5870 / sdebug_sectors_per;
5871 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5873 pp->start_sect = cpu_to_le32(start_sec);
5874 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5875 pp->sys_ind = 0x83; /* plain Linux partition */
5879 static void block_unblock_all_queues(bool block)
5881 struct sdebug_host_info *sdhp;
5883 lockdep_assert_held(&sdebug_host_list_mutex);
5885 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5886 struct Scsi_Host *shost = sdhp->shost;
5889 scsi_block_requests(shost);
5891 scsi_unblock_requests(shost);
5895 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5896 * commands will be processed normally before triggers occur.
5898 static void tweak_cmnd_count(void)
5902 modulo = abs(sdebug_every_nth);
5906 mutex_lock(&sdebug_host_list_mutex);
5907 block_unblock_all_queues(true);
5908 count = atomic_read(&sdebug_cmnd_count);
5909 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5910 block_unblock_all_queues(false);
5911 mutex_unlock(&sdebug_host_list_mutex);
5914 static void clear_queue_stats(void)
5916 atomic_set(&sdebug_cmnd_count, 0);
5917 atomic_set(&sdebug_completions, 0);
5918 atomic_set(&sdebug_miss_cpus, 0);
5919 atomic_set(&sdebug_a_tsf, 0);
5922 static bool inject_on_this_cmd(void)
5924 if (sdebug_every_nth == 0)
5926 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5929 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5932 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5935 kmem_cache_free(queued_cmd_cache, sqcp);
5938 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5940 struct sdebug_queued_cmd *sqcp;
5941 struct sdebug_defer *sd_dp;
5943 sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5947 sd_dp = &sqcp->sd_dp;
5949 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5950 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5951 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5958 /* Complete the processing of the thread that queued a SCSI command to this
5959 * driver. It either completes the command by calling cmnd_done() or
5960 * schedules a hr timer or work queue then returns 0. Returns
5961 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5963 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5965 int (*pfp)(struct scsi_cmnd *,
5966 struct sdebug_dev_info *),
5967 int delta_jiff, int ndelay)
5969 struct request *rq = scsi_cmd_to_rq(cmnd);
5970 bool polled = rq->cmd_flags & REQ_POLLED;
5971 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5972 unsigned long flags;
5973 u64 ns_from_boot = 0;
5974 struct sdebug_queued_cmd *sqcp;
5975 struct scsi_device *sdp;
5976 struct sdebug_defer *sd_dp;
5978 if (unlikely(devip == NULL)) {
5979 if (scsi_result == 0)
5980 scsi_result = DID_NO_CONNECT << 16;
5981 goto respond_in_thread;
5985 if (delta_jiff == 0)
5986 goto respond_in_thread;
5989 if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5990 (scsi_result == 0))) {
5991 int num_in_q = scsi_device_busy(sdp);
5992 int qdepth = cmnd->device->queue_depth;
5994 if ((num_in_q == qdepth) &&
5995 (atomic_inc_return(&sdebug_a_tsf) >=
5996 abs(sdebug_every_nth))) {
5997 atomic_set(&sdebug_a_tsf, 0);
5998 scsi_result = device_qfull_result;
6000 if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6001 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6002 __func__, num_in_q);
6006 sqcp = sdebug_alloc_queued_cmd(cmnd);
6008 pr_err("%s no alloc\n", __func__);
6009 return SCSI_MLQUEUE_HOST_BUSY;
6011 sd_dp = &sqcp->sd_dp;
6014 ns_from_boot = ktime_get_boottime_ns();
6016 /* one of the resp_*() response functions is called here */
6017 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6018 if (cmnd->result & SDEG_RES_IMMED_MASK) {
6019 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6020 delta_jiff = ndelay = 0;
6022 if (cmnd->result == 0 && scsi_result != 0)
6023 cmnd->result = scsi_result;
6024 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6025 if (atomic_read(&sdeb_inject_pending)) {
6026 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6027 atomic_set(&sdeb_inject_pending, 0);
6028 cmnd->result = check_condition_result;
6032 if (unlikely(sdebug_verbose && cmnd->result))
6033 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6034 __func__, cmnd->result);
6036 if (delta_jiff > 0 || ndelay > 0) {
6039 if (delta_jiff > 0) {
6040 u64 ns = jiffies_to_nsecs(delta_jiff);
6042 if (sdebug_random && ns < U32_MAX) {
6043 ns = get_random_u32_below((u32)ns);
6044 } else if (sdebug_random) {
6045 ns >>= 12; /* scale to 4 usec precision */
6046 if (ns < U32_MAX) /* over 4 hours max */
6047 ns = get_random_u32_below((u32)ns);
6050 kt = ns_to_ktime(ns);
6051 } else { /* ndelay has a 4.2 second max */
6052 kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6054 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6055 u64 d = ktime_get_boottime_ns() - ns_from_boot;
6057 if (kt <= d) { /* elapsed duration >= kt */
6058 /* call scsi_done() from this thread */
6059 sdebug_free_queued_cmd(sqcp);
6063 /* otherwise reduce kt by elapsed time */
6067 if (sdebug_statistics)
6068 sd_dp->issuing_cpu = raw_smp_processor_id();
6070 spin_lock_irqsave(&sdsc->lock, flags);
6071 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6072 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6073 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6074 spin_unlock_irqrestore(&sdsc->lock, flags);
6076 /* schedule the invocation of scsi_done() for a later time */
6077 spin_lock_irqsave(&sdsc->lock, flags);
6078 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6079 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6080 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6082 * The completion handler will try to grab sqcp->lock,
6083 * so there is no chance that the completion handler
6084 * will call scsi_done() until we release the lock
6085 * here (so ok to keep referencing sdsc).
6087 spin_unlock_irqrestore(&sdsc->lock, flags);
6089 } else { /* jdelay < 0, use work queue */
6090 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6091 atomic_read(&sdeb_inject_pending))) {
6092 sd_dp->aborted = true;
6093 atomic_set(&sdeb_inject_pending, 0);
6094 sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6095 blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6098 if (sdebug_statistics)
6099 sd_dp->issuing_cpu = raw_smp_processor_id();
6101 spin_lock_irqsave(&sdsc->lock, flags);
6102 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6103 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6104 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6105 spin_unlock_irqrestore(&sdsc->lock, flags);
6107 spin_lock_irqsave(&sdsc->lock, flags);
6108 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6109 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6110 schedule_work(&sd_dp->ew.work);
6111 spin_unlock_irqrestore(&sdsc->lock, flags);
6117 respond_in_thread: /* call back to mid-layer using invocation thread */
6118 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6119 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6120 if (cmnd->result == 0 && scsi_result != 0)
6121 cmnd->result = scsi_result;
6126 /* Note: The following macros create attribute files in the
6127 /sys/module/scsi_debug/parameters directory. Unfortunately this
6128 driver is unaware of a change and cannot trigger auxiliary actions
6129 as it can when the corresponding attribute in the
6130 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6132 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6133 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6134 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6135 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6136 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6137 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6138 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6139 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6140 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6141 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6142 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6143 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6144 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6145 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6146 module_param_string(inq_product, sdebug_inq_product_id,
6147 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6148 module_param_string(inq_rev, sdebug_inq_product_rev,
6149 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6150 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6151 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6152 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6153 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6154 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6155 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6156 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6157 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6158 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6159 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6160 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6162 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6164 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6165 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6166 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6167 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6168 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6169 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6170 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6171 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6172 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6173 module_param_named(per_host_store, sdebug_per_host_store, bool,
6175 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6176 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6177 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6178 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6179 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6180 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6181 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6182 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6183 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6184 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6185 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6186 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6187 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6188 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6189 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6190 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6191 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6192 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6194 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6195 module_param_named(write_same_length, sdebug_write_same_length, int,
6197 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6198 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6199 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6200 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6201 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6202 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6204 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6205 MODULE_DESCRIPTION("SCSI debug adapter driver");
6206 MODULE_LICENSE("GPL");
6207 MODULE_VERSION(SDEBUG_VERSION);
6209 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6210 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6211 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6212 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6213 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6214 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6215 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6216 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6217 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6218 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6219 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6220 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6221 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6222 MODULE_PARM_DESC(host_max_queue,
6223 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6224 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6225 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6226 SDEBUG_VERSION "\")");
6227 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6228 MODULE_PARM_DESC(lbprz,
6229 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6230 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6231 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6232 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6233 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6234 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6235 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6236 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6237 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6238 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6239 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6240 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6241 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6242 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6243 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6244 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6245 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6246 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6247 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6248 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6249 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6250 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6251 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6252 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6253 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6254 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6255 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6256 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6257 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6258 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6259 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6260 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6261 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6262 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6263 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6264 MODULE_PARM_DESC(uuid_ctl,
6265 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6266 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6267 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6268 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6269 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6270 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6271 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6272 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6273 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6274 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6275 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6277 #define SDEBUG_INFO_LEN 256
6278 static char sdebug_info[SDEBUG_INFO_LEN];
6280 static const char *scsi_debug_info(struct Scsi_Host *shp)
6284 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6285 my_name, SDEBUG_VERSION, sdebug_version_date);
6286 if (k >= (SDEBUG_INFO_LEN - 1))
6288 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6289 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6290 sdebug_dev_size_mb, sdebug_opts, submit_queues,
6291 "statistics", (int)sdebug_statistics);
6295 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
6296 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6301 int minLen = length > 15 ? 15 : length;
6303 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6305 memcpy(arr, buffer, minLen);
6307 if (1 != sscanf(arr, "%d", &opts))
6310 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6311 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6312 if (sdebug_every_nth != 0)
6317 struct sdebug_submit_queue_data {
6323 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6325 struct sdebug_submit_queue_data *data = opaque;
6326 u32 unique_tag = blk_mq_unique_tag(rq);
6327 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6328 u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6329 int queue_num = data->queue_num;
6331 if (hwq != queue_num)
6334 /* Rely on iter'ing in ascending tag order */
6335 if (*data->first == -1)
6336 *data->first = *data->last = tag;
6343 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6344 * same for each scsi_debug host (if more than one). Some of the counters
6345 * output are not atomics so might be inaccurate in a busy system. */
6346 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6348 struct sdebug_host_info *sdhp;
6351 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6352 SDEBUG_VERSION, sdebug_version_date);
6353 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6354 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6355 sdebug_opts, sdebug_every_nth);
6356 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6357 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6358 sdebug_sector_size, "bytes");
6359 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6360 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6362 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6363 num_dev_resets, num_target_resets, num_bus_resets,
6365 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6366 dix_reads, dix_writes, dif_errors);
6367 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6369 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6370 atomic_read(&sdebug_cmnd_count),
6371 atomic_read(&sdebug_completions),
6372 "miss_cpus", atomic_read(&sdebug_miss_cpus),
6373 atomic_read(&sdebug_a_tsf),
6374 atomic_read(&sdeb_mq_poll_count));
6376 seq_printf(m, "submit_queues=%d\n", submit_queues);
6377 for (j = 0; j < submit_queues; ++j) {
6379 struct sdebug_submit_queue_data data = {
6384 seq_printf(m, " queue %d:\n", j);
6385 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6388 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
6389 "first,last bits", f, l);
6393 seq_printf(m, "this host_no=%d\n", host->host_no);
6394 if (!xa_empty(per_store_ap)) {
6397 unsigned long l_idx;
6398 struct sdeb_store_info *sip;
6400 seq_puts(m, "\nhost list:\n");
6402 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6404 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
6405 sdhp->shost->host_no, idx);
6408 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6409 sdeb_most_recent_idx);
6411 xa_for_each(per_store_ap, l_idx, sip) {
6412 niu = xa_get_mark(per_store_ap, l_idx,
6413 SDEB_XA_NOT_IN_USE);
6415 seq_printf(m, " %d: idx=%d%s\n", j, idx,
6416 (niu ? " not_in_use" : ""));
6423 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6425 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6427 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6428 * of delay is jiffies.
6430 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6435 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6437 if (sdebug_jdelay != jdelay) {
6438 struct sdebug_host_info *sdhp;
6440 mutex_lock(&sdebug_host_list_mutex);
6441 block_unblock_all_queues(true);
6443 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6444 struct Scsi_Host *shost = sdhp->shost;
6446 if (scsi_host_busy(shost)) {
6447 res = -EBUSY; /* queued commands */
6452 sdebug_jdelay = jdelay;
6455 block_unblock_all_queues(false);
6456 mutex_unlock(&sdebug_host_list_mutex);
6462 static DRIVER_ATTR_RW(delay);
6464 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6466 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6468 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6469 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6470 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6475 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6476 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6478 if (sdebug_ndelay != ndelay) {
6479 struct sdebug_host_info *sdhp;
6481 mutex_lock(&sdebug_host_list_mutex);
6482 block_unblock_all_queues(true);
6484 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6485 struct Scsi_Host *shost = sdhp->shost;
6487 if (scsi_host_busy(shost)) {
6488 res = -EBUSY; /* queued commands */
6494 sdebug_ndelay = ndelay;
6495 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6498 block_unblock_all_queues(false);
6499 mutex_unlock(&sdebug_host_list_mutex);
6505 static DRIVER_ATTR_RW(ndelay);
6507 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6509 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6512 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6518 if (sscanf(buf, "%10s", work) == 1) {
6519 if (strncasecmp(work, "0x", 2) == 0) {
6520 if (kstrtoint(work + 2, 16, &opts) == 0)
6523 if (kstrtoint(work, 10, &opts) == 0)
6530 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6531 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6535 static DRIVER_ATTR_RW(opts);
6537 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6539 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6541 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6546 /* Cannot change from or to TYPE_ZBC with sysfs */
6547 if (sdebug_ptype == TYPE_ZBC)
6550 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6558 static DRIVER_ATTR_RW(ptype);
6560 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6562 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6564 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6569 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6575 static DRIVER_ATTR_RW(dsense);
6577 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6579 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6581 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6586 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6587 bool want_store = (n == 0);
6588 struct sdebug_host_info *sdhp;
6591 sdebug_fake_rw = (sdebug_fake_rw > 0);
6592 if (sdebug_fake_rw == n)
6593 return count; /* not transitioning so do nothing */
6595 if (want_store) { /* 1 --> 0 transition, set up store */
6596 if (sdeb_first_idx < 0) {
6597 idx = sdebug_add_store();
6601 idx = sdeb_first_idx;
6602 xa_clear_mark(per_store_ap, idx,
6603 SDEB_XA_NOT_IN_USE);
6605 /* make all hosts use same store */
6606 list_for_each_entry(sdhp, &sdebug_host_list,
6608 if (sdhp->si_idx != idx) {
6609 xa_set_mark(per_store_ap, sdhp->si_idx,
6610 SDEB_XA_NOT_IN_USE);
6614 sdeb_most_recent_idx = idx;
6615 } else { /* 0 --> 1 transition is trigger for shrink */
6616 sdebug_erase_all_stores(true /* apart from first */);
6623 static DRIVER_ATTR_RW(fake_rw);
6625 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6627 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6629 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6634 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6635 sdebug_no_lun_0 = n;
6640 static DRIVER_ATTR_RW(no_lun_0);
6642 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6644 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6646 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6651 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6652 sdebug_num_tgts = n;
6653 sdebug_max_tgts_luns();
6658 static DRIVER_ATTR_RW(num_tgts);
6660 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6662 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6664 static DRIVER_ATTR_RO(dev_size_mb);
6666 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6668 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6671 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6676 if (kstrtobool(buf, &v))
6679 sdebug_per_host_store = v;
6682 static DRIVER_ATTR_RW(per_host_store);
6684 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6686 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6688 static DRIVER_ATTR_RO(num_parts);
6690 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6692 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6694 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6700 if (sscanf(buf, "%10s", work) == 1) {
6701 if (strncasecmp(work, "0x", 2) == 0) {
6702 if (kstrtoint(work + 2, 16, &nth) == 0)
6703 goto every_nth_done;
6705 if (kstrtoint(work, 10, &nth) == 0)
6706 goto every_nth_done;
6712 sdebug_every_nth = nth;
6713 if (nth && !sdebug_statistics) {
6714 pr_info("every_nth needs statistics=1, set it\n");
6715 sdebug_statistics = true;
6720 static DRIVER_ATTR_RW(every_nth);
6722 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6724 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6726 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6732 if (kstrtoint(buf, 0, &n))
6735 if (n > (int)SAM_LUN_AM_FLAT) {
6736 pr_warn("only LUN address methods 0 and 1 are supported\n");
6739 changed = ((int)sdebug_lun_am != n);
6741 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6742 struct sdebug_host_info *sdhp;
6743 struct sdebug_dev_info *dp;
6745 mutex_lock(&sdebug_host_list_mutex);
6746 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6747 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6748 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6751 mutex_unlock(&sdebug_host_list_mutex);
6757 static DRIVER_ATTR_RW(lun_format);
6759 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6761 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6763 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6769 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6771 pr_warn("max_luns can be no more than 256\n");
6774 changed = (sdebug_max_luns != n);
6775 sdebug_max_luns = n;
6776 sdebug_max_tgts_luns();
6777 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6778 struct sdebug_host_info *sdhp;
6779 struct sdebug_dev_info *dp;
6781 mutex_lock(&sdebug_host_list_mutex);
6782 list_for_each_entry(sdhp, &sdebug_host_list,
6784 list_for_each_entry(dp, &sdhp->dev_info_list,
6786 set_bit(SDEBUG_UA_LUNS_CHANGED,
6790 mutex_unlock(&sdebug_host_list_mutex);
6796 static DRIVER_ATTR_RW(max_luns);
6798 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6800 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6802 /* N.B. max_queue can be changed while there are queued commands. In flight
6803 * commands beyond the new max_queue will be completed. */
6804 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6809 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6810 (n <= SDEBUG_CANQUEUE) &&
6811 (sdebug_host_max_queue == 0)) {
6812 mutex_lock(&sdebug_host_list_mutex);
6814 /* We may only change sdebug_max_queue when we have no shosts */
6815 if (list_empty(&sdebug_host_list))
6816 sdebug_max_queue = n;
6819 mutex_unlock(&sdebug_host_list_mutex);
6824 static DRIVER_ATTR_RW(max_queue);
6826 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6828 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6831 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6833 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6836 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6840 if (kstrtobool(buf, &v))
6843 sdebug_no_rwlock = v;
6846 static DRIVER_ATTR_RW(no_rwlock);
6849 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6850 * in range [0, sdebug_host_max_queue), we can't change it.
6852 static DRIVER_ATTR_RO(host_max_queue);
6854 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6856 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6858 static DRIVER_ATTR_RO(no_uld);
6860 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6862 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6864 static DRIVER_ATTR_RO(scsi_level);
6866 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6868 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6870 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6876 /* Ignore capacity change for ZBC drives for now */
6877 if (sdeb_zbc_in_use)
6880 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6881 changed = (sdebug_virtual_gb != n);
6882 sdebug_virtual_gb = n;
6883 sdebug_capacity = get_sdebug_capacity();
6885 struct sdebug_host_info *sdhp;
6886 struct sdebug_dev_info *dp;
6888 mutex_lock(&sdebug_host_list_mutex);
6889 list_for_each_entry(sdhp, &sdebug_host_list,
6891 list_for_each_entry(dp, &sdhp->dev_info_list,
6893 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6897 mutex_unlock(&sdebug_host_list_mutex);
6903 static DRIVER_ATTR_RW(virtual_gb);
6905 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6907 /* absolute number of hosts currently active is what is shown */
6908 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6911 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6916 struct sdeb_store_info *sip;
6917 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6920 if (sscanf(buf, "%d", &delta_hosts) != 1)
6922 if (delta_hosts > 0) {
6926 xa_for_each_marked(per_store_ap, idx, sip,
6927 SDEB_XA_NOT_IN_USE) {
6928 sdeb_most_recent_idx = (int)idx;
6932 if (found) /* re-use case */
6933 sdebug_add_host_helper((int)idx);
6935 sdebug_do_add_host(true);
6937 sdebug_do_add_host(false);
6939 } while (--delta_hosts);
6940 } else if (delta_hosts < 0) {
6942 sdebug_do_remove_host(false);
6943 } while (++delta_hosts);
6947 static DRIVER_ATTR_RW(add_host);
6949 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6951 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6953 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6958 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6959 sdebug_vpd_use_hostno = n;
6964 static DRIVER_ATTR_RW(vpd_use_hostno);
6966 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6968 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6970 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6975 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6977 sdebug_statistics = true;
6979 clear_queue_stats();
6980 sdebug_statistics = false;
6986 static DRIVER_ATTR_RW(statistics);
6988 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6990 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6992 static DRIVER_ATTR_RO(sector_size);
6994 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6996 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6998 static DRIVER_ATTR_RO(submit_queues);
7000 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7002 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7004 static DRIVER_ATTR_RO(dix);
7006 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7008 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7010 static DRIVER_ATTR_RO(dif);
7012 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7014 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7016 static DRIVER_ATTR_RO(guard);
7018 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7020 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7022 static DRIVER_ATTR_RO(ato);
7024 static ssize_t map_show(struct device_driver *ddp, char *buf)
7028 if (!scsi_debug_lbp())
7029 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7030 sdebug_store_sectors);
7032 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7033 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7036 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7037 (int)map_size, sip->map_storep);
7039 buf[count++] = '\n';
7044 static DRIVER_ATTR_RO(map);
7046 static ssize_t random_show(struct device_driver *ddp, char *buf)
7048 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7051 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7056 if (kstrtobool(buf, &v))
7062 static DRIVER_ATTR_RW(random);
7064 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7066 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7068 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7073 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7074 sdebug_removable = (n > 0);
7079 static DRIVER_ATTR_RW(removable);
7081 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7083 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7085 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
7086 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7091 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7092 sdebug_host_lock = (n > 0);
7097 static DRIVER_ATTR_RW(host_lock);
7099 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7101 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7103 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7108 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7109 sdebug_strict = (n > 0);
7114 static DRIVER_ATTR_RW(strict);
7116 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7118 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7120 static DRIVER_ATTR_RO(uuid_ctl);
7122 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7124 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7126 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7131 ret = kstrtoint(buf, 0, &n);
7135 all_config_cdb_len();
7138 static DRIVER_ATTR_RW(cdb_len);
7140 static const char * const zbc_model_strs_a[] = {
7141 [BLK_ZONED_NONE] = "none",
7142 [BLK_ZONED_HA] = "host-aware",
7143 [BLK_ZONED_HM] = "host-managed",
7146 static const char * const zbc_model_strs_b[] = {
7147 [BLK_ZONED_NONE] = "no",
7148 [BLK_ZONED_HA] = "aware",
7149 [BLK_ZONED_HM] = "managed",
7152 static const char * const zbc_model_strs_c[] = {
7153 [BLK_ZONED_NONE] = "0",
7154 [BLK_ZONED_HA] = "1",
7155 [BLK_ZONED_HM] = "2",
7158 static int sdeb_zbc_model_str(const char *cp)
7160 int res = sysfs_match_string(zbc_model_strs_a, cp);
7163 res = sysfs_match_string(zbc_model_strs_b, cp);
7165 res = sysfs_match_string(zbc_model_strs_c, cp);
7173 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7175 return scnprintf(buf, PAGE_SIZE, "%s\n",
7176 zbc_model_strs_a[sdeb_zbc_model]);
7178 static DRIVER_ATTR_RO(zbc);
7180 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7182 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7184 static DRIVER_ATTR_RO(tur_ms_to_ready);
7186 /* Note: The following array creates attribute files in the
7187 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7188 files (over those found in the /sys/module/scsi_debug/parameters
7189 directory) is that auxiliary actions can be triggered when an attribute
7190 is changed. For example see: add_host_store() above.
7193 static struct attribute *sdebug_drv_attrs[] = {
7194 &driver_attr_delay.attr,
7195 &driver_attr_opts.attr,
7196 &driver_attr_ptype.attr,
7197 &driver_attr_dsense.attr,
7198 &driver_attr_fake_rw.attr,
7199 &driver_attr_host_max_queue.attr,
7200 &driver_attr_no_lun_0.attr,
7201 &driver_attr_num_tgts.attr,
7202 &driver_attr_dev_size_mb.attr,
7203 &driver_attr_num_parts.attr,
7204 &driver_attr_every_nth.attr,
7205 &driver_attr_lun_format.attr,
7206 &driver_attr_max_luns.attr,
7207 &driver_attr_max_queue.attr,
7208 &driver_attr_no_rwlock.attr,
7209 &driver_attr_no_uld.attr,
7210 &driver_attr_scsi_level.attr,
7211 &driver_attr_virtual_gb.attr,
7212 &driver_attr_add_host.attr,
7213 &driver_attr_per_host_store.attr,
7214 &driver_attr_vpd_use_hostno.attr,
7215 &driver_attr_sector_size.attr,
7216 &driver_attr_statistics.attr,
7217 &driver_attr_submit_queues.attr,
7218 &driver_attr_dix.attr,
7219 &driver_attr_dif.attr,
7220 &driver_attr_guard.attr,
7221 &driver_attr_ato.attr,
7222 &driver_attr_map.attr,
7223 &driver_attr_random.attr,
7224 &driver_attr_removable.attr,
7225 &driver_attr_host_lock.attr,
7226 &driver_attr_ndelay.attr,
7227 &driver_attr_strict.attr,
7228 &driver_attr_uuid_ctl.attr,
7229 &driver_attr_cdb_len.attr,
7230 &driver_attr_tur_ms_to_ready.attr,
7231 &driver_attr_zbc.attr,
7234 ATTRIBUTE_GROUPS(sdebug_drv);
7236 static struct device *pseudo_primary;
7238 static int __init scsi_debug_init(void)
7240 bool want_store = (sdebug_fake_rw == 0);
7242 int k, ret, hosts_to_add;
7245 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7246 pr_warn("ndelay must be less than 1 second, ignored\n");
7248 } else if (sdebug_ndelay > 0)
7249 sdebug_jdelay = JDELAY_OVERRIDDEN;
7251 switch (sdebug_sector_size) {
7258 pr_err("invalid sector_size %d\n", sdebug_sector_size);
7262 switch (sdebug_dif) {
7263 case T10_PI_TYPE0_PROTECTION:
7265 case T10_PI_TYPE1_PROTECTION:
7266 case T10_PI_TYPE2_PROTECTION:
7267 case T10_PI_TYPE3_PROTECTION:
7268 have_dif_prot = true;
7272 pr_err("dif must be 0, 1, 2 or 3\n");
7276 if (sdebug_num_tgts < 0) {
7277 pr_err("num_tgts must be >= 0\n");
7281 if (sdebug_guard > 1) {
7282 pr_err("guard must be 0 or 1\n");
7286 if (sdebug_ato > 1) {
7287 pr_err("ato must be 0 or 1\n");
7291 if (sdebug_physblk_exp > 15) {
7292 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7296 sdebug_lun_am = sdebug_lun_am_i;
7297 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7298 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7299 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7302 if (sdebug_max_luns > 256) {
7303 if (sdebug_max_luns > 16384) {
7304 pr_warn("max_luns can be no more than 16384, use default\n");
7305 sdebug_max_luns = DEF_MAX_LUNS;
7307 sdebug_lun_am = SAM_LUN_AM_FLAT;
7310 if (sdebug_lowest_aligned > 0x3fff) {
7311 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7315 if (submit_queues < 1) {
7316 pr_err("submit_queues must be 1 or more\n");
7320 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7321 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7325 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7326 (sdebug_host_max_queue < 0)) {
7327 pr_err("host_max_queue must be in range [0 %d]\n",
7332 if (sdebug_host_max_queue &&
7333 (sdebug_max_queue != sdebug_host_max_queue)) {
7334 sdebug_max_queue = sdebug_host_max_queue;
7335 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7340 * check for host managed zoned block device specified with
7341 * ptype=0x14 or zbc=XXX.
7343 if (sdebug_ptype == TYPE_ZBC) {
7344 sdeb_zbc_model = BLK_ZONED_HM;
7345 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7346 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7350 switch (sdeb_zbc_model) {
7351 case BLK_ZONED_NONE:
7353 sdebug_ptype = TYPE_DISK;
7356 sdebug_ptype = TYPE_ZBC;
7359 pr_err("Invalid ZBC model\n");
7363 if (sdeb_zbc_model != BLK_ZONED_NONE) {
7364 sdeb_zbc_in_use = true;
7365 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7366 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7369 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7370 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7371 if (sdebug_dev_size_mb < 1)
7372 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
7373 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7374 sdebug_store_sectors = sz / sdebug_sector_size;
7375 sdebug_capacity = get_sdebug_capacity();
7377 /* play around with geometry, don't waste too much on track 0 */
7379 sdebug_sectors_per = 32;
7380 if (sdebug_dev_size_mb >= 256)
7382 else if (sdebug_dev_size_mb >= 16)
7384 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7385 (sdebug_sectors_per * sdebug_heads);
7386 if (sdebug_cylinders_per >= 1024) {
7387 /* other LLDs do this; implies >= 1GB ram disk ... */
7389 sdebug_sectors_per = 63;
7390 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7391 (sdebug_sectors_per * sdebug_heads);
7393 if (scsi_debug_lbp()) {
7394 sdebug_unmap_max_blocks =
7395 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7397 sdebug_unmap_max_desc =
7398 clamp(sdebug_unmap_max_desc, 0U, 256U);
7400 sdebug_unmap_granularity =
7401 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7403 if (sdebug_unmap_alignment &&
7404 sdebug_unmap_granularity <=
7405 sdebug_unmap_alignment) {
7406 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7410 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7412 idx = sdebug_add_store();
7417 pseudo_primary = root_device_register("pseudo_0");
7418 if (IS_ERR(pseudo_primary)) {
7419 pr_warn("root_device_register() error\n");
7420 ret = PTR_ERR(pseudo_primary);
7423 ret = bus_register(&pseudo_lld_bus);
7425 pr_warn("bus_register error: %d\n", ret);
7428 ret = driver_register(&sdebug_driverfs_driver);
7430 pr_warn("driver_register error: %d\n", ret);
7434 hosts_to_add = sdebug_add_host;
7435 sdebug_add_host = 0;
7437 queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7438 if (!queued_cmd_cache) {
7443 sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7444 if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7445 pr_info("%s: failed to create initial debugfs directory\n", __func__);
7447 for (k = 0; k < hosts_to_add; k++) {
7448 if (want_store && k == 0) {
7449 ret = sdebug_add_host_helper(idx);
7451 pr_err("add_host_helper k=%d, error=%d\n",
7456 ret = sdebug_do_add_host(want_store &&
7457 sdebug_per_host_store);
7459 pr_err("add_host k=%d error=%d\n", k, -ret);
7465 pr_info("built %d host(s)\n", sdebug_num_hosts);
7470 driver_unregister(&sdebug_driverfs_driver);
7472 bus_unregister(&pseudo_lld_bus);
7474 root_device_unregister(pseudo_primary);
7476 sdebug_erase_store(idx, NULL);
7480 static void __exit scsi_debug_exit(void)
7482 int k = sdebug_num_hosts;
7485 sdebug_do_remove_host(true);
7486 kmem_cache_destroy(queued_cmd_cache);
7487 driver_unregister(&sdebug_driverfs_driver);
7488 bus_unregister(&pseudo_lld_bus);
7489 root_device_unregister(pseudo_primary);
7491 sdebug_erase_all_stores(false);
7492 xa_destroy(per_store_ap);
7493 debugfs_remove(sdebug_debugfs_root);
7496 device_initcall(scsi_debug_init);
7497 module_exit(scsi_debug_exit);
7499 static void sdebug_release_adapter(struct device *dev)
7501 struct sdebug_host_info *sdbg_host;
7503 sdbg_host = dev_to_sdebug_host(dev);
7507 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7508 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7513 if (xa_empty(per_store_ap))
7515 sip = xa_load(per_store_ap, idx);
7519 vfree(sip->map_storep);
7520 vfree(sip->dif_storep);
7522 xa_erase(per_store_ap, idx);
7526 /* Assume apart_from_first==false only in shutdown case. */
7527 static void sdebug_erase_all_stores(bool apart_from_first)
7530 struct sdeb_store_info *sip = NULL;
7532 xa_for_each(per_store_ap, idx, sip) {
7533 if (apart_from_first)
7534 apart_from_first = false;
7536 sdebug_erase_store(idx, sip);
7538 if (apart_from_first)
7539 sdeb_most_recent_idx = sdeb_first_idx;
7543 * Returns store xarray new element index (idx) if >=0 else negated errno.
7544 * Limit the number of stores to 65536.
7546 static int sdebug_add_store(void)
7550 unsigned long iflags;
7551 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7552 struct sdeb_store_info *sip = NULL;
7553 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7555 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7559 xa_lock_irqsave(per_store_ap, iflags);
7560 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7561 if (unlikely(res < 0)) {
7562 xa_unlock_irqrestore(per_store_ap, iflags);
7564 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7567 sdeb_most_recent_idx = n_idx;
7568 if (sdeb_first_idx < 0)
7569 sdeb_first_idx = n_idx;
7570 xa_unlock_irqrestore(per_store_ap, iflags);
7573 sip->storep = vzalloc(sz);
7575 pr_err("user data oom\n");
7578 if (sdebug_num_parts > 0)
7579 sdebug_build_parts(sip->storep, sz);
7581 /* DIF/DIX: what T10 calls Protection Information (PI) */
7585 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7586 sip->dif_storep = vmalloc(dif_size);
7588 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7591 if (!sip->dif_storep) {
7592 pr_err("DIX oom\n");
7595 memset(sip->dif_storep, 0xff, dif_size);
7597 /* Logical Block Provisioning */
7598 if (scsi_debug_lbp()) {
7599 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7600 sip->map_storep = vmalloc(array_size(sizeof(long),
7601 BITS_TO_LONGS(map_size)));
7603 pr_info("%lu provisioning blocks\n", map_size);
7605 if (!sip->map_storep) {
7606 pr_err("LBP map oom\n");
7610 bitmap_zero(sip->map_storep, map_size);
7612 /* Map first 1KB for partition table */
7613 if (sdebug_num_parts)
7614 map_region(sip, 0, 2);
7617 rwlock_init(&sip->macc_lck);
7620 sdebug_erase_store((int)n_idx, sip);
7621 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7625 static int sdebug_add_host_helper(int per_host_idx)
7627 int k, devs_per_host, idx;
7628 int error = -ENOMEM;
7629 struct sdebug_host_info *sdbg_host;
7630 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7632 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7635 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7636 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7637 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7638 sdbg_host->si_idx = idx;
7640 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7642 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7643 for (k = 0; k < devs_per_host; k++) {
7644 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7649 mutex_lock(&sdebug_host_list_mutex);
7650 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7651 mutex_unlock(&sdebug_host_list_mutex);
7653 sdbg_host->dev.bus = &pseudo_lld_bus;
7654 sdbg_host->dev.parent = pseudo_primary;
7655 sdbg_host->dev.release = &sdebug_release_adapter;
7656 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7658 error = device_register(&sdbg_host->dev);
7660 mutex_lock(&sdebug_host_list_mutex);
7661 list_del(&sdbg_host->host_list);
7662 mutex_unlock(&sdebug_host_list_mutex);
7670 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7672 list_del(&sdbg_devinfo->dev_list);
7673 kfree(sdbg_devinfo->zstate);
7674 kfree(sdbg_devinfo);
7676 if (sdbg_host->dev.release)
7677 put_device(&sdbg_host->dev);
7680 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7684 static int sdebug_do_add_host(bool mk_new_store)
7686 int ph_idx = sdeb_most_recent_idx;
7689 ph_idx = sdebug_add_store();
7693 return sdebug_add_host_helper(ph_idx);
7696 static void sdebug_do_remove_host(bool the_end)
7699 struct sdebug_host_info *sdbg_host = NULL;
7700 struct sdebug_host_info *sdbg_host2;
7702 mutex_lock(&sdebug_host_list_mutex);
7703 if (!list_empty(&sdebug_host_list)) {
7704 sdbg_host = list_entry(sdebug_host_list.prev,
7705 struct sdebug_host_info, host_list);
7706 idx = sdbg_host->si_idx;
7708 if (!the_end && idx >= 0) {
7711 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7712 if (sdbg_host2 == sdbg_host)
7714 if (idx == sdbg_host2->si_idx) {
7720 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7721 if (idx == sdeb_most_recent_idx)
7722 --sdeb_most_recent_idx;
7726 list_del(&sdbg_host->host_list);
7727 mutex_unlock(&sdebug_host_list_mutex);
7732 device_unregister(&sdbg_host->dev);
7736 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7738 struct sdebug_dev_info *devip = sdev->hostdata;
7743 mutex_lock(&sdebug_host_list_mutex);
7744 block_unblock_all_queues(true);
7746 if (qdepth > SDEBUG_CANQUEUE) {
7747 qdepth = SDEBUG_CANQUEUE;
7748 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7749 qdepth, SDEBUG_CANQUEUE);
7753 if (qdepth != sdev->queue_depth)
7754 scsi_change_queue_depth(sdev, qdepth);
7756 block_unblock_all_queues(false);
7757 mutex_unlock(&sdebug_host_list_mutex);
7759 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7760 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7762 return sdev->queue_depth;
7765 static bool fake_timeout(struct scsi_cmnd *scp)
7767 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7768 if (sdebug_every_nth < -1)
7769 sdebug_every_nth = -1;
7770 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7771 return true; /* ignore command causing timeout */
7772 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7773 scsi_medium_access_command(scp))
7774 return true; /* time out reads and writes */
7779 /* Response to TUR or media access command when device stopped */
7780 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7784 ktime_t now_ts = ktime_get_boottime();
7785 struct scsi_device *sdp = scp->device;
7787 stopped_state = atomic_read(&devip->stopped);
7788 if (stopped_state == 2) {
7789 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7790 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7791 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7792 /* tur_ms_to_ready timer extinguished */
7793 atomic_set(&devip->stopped, 0);
7797 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7799 sdev_printk(KERN_INFO, sdp,
7800 "%s: Not ready: in process of becoming ready\n", my_name);
7801 if (scp->cmnd[0] == TEST_UNIT_READY) {
7802 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7804 if (diff_ns <= tur_nanosecs_to_ready)
7805 diff_ns = tur_nanosecs_to_ready - diff_ns;
7807 diff_ns = tur_nanosecs_to_ready;
7808 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7809 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7810 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7812 return check_condition_result;
7815 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7817 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7819 return check_condition_result;
7822 static void sdebug_map_queues(struct Scsi_Host *shost)
7826 if (shost->nr_hw_queues == 1)
7829 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7830 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7834 if (i == HCTX_TYPE_DEFAULT)
7835 map->nr_queues = submit_queues - poll_queues;
7836 else if (i == HCTX_TYPE_POLL)
7837 map->nr_queues = poll_queues;
7839 if (!map->nr_queues) {
7840 BUG_ON(i == HCTX_TYPE_DEFAULT);
7844 map->queue_offset = qoff;
7845 blk_mq_map_queues(map);
7847 qoff += map->nr_queues;
7851 struct sdebug_blk_mq_poll_data {
7852 unsigned int queue_num;
7857 * We don't handle aborted commands here, but it does not seem possible to have
7858 * aborted polled commands from schedule_resp()
7860 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7862 struct sdebug_blk_mq_poll_data *data = opaque;
7863 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7864 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7865 struct sdebug_defer *sd_dp;
7866 u32 unique_tag = blk_mq_unique_tag(rq);
7867 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7868 struct sdebug_queued_cmd *sqcp;
7869 unsigned long flags;
7870 int queue_num = data->queue_num;
7873 /* We're only interested in one queue for this iteration */
7874 if (hwq != queue_num)
7877 /* Subsequent checks would fail if this failed, but check anyway */
7878 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7881 time = ktime_get_boottime();
7883 spin_lock_irqsave(&sdsc->lock, flags);
7884 sqcp = TO_QUEUED_CMD(cmd);
7886 spin_unlock_irqrestore(&sdsc->lock, flags);
7890 sd_dp = &sqcp->sd_dp;
7891 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7892 spin_unlock_irqrestore(&sdsc->lock, flags);
7896 if (time < sd_dp->cmpl_ts) {
7897 spin_unlock_irqrestore(&sdsc->lock, flags);
7901 ASSIGN_QUEUED_CMD(cmd, NULL);
7902 spin_unlock_irqrestore(&sdsc->lock, flags);
7904 if (sdebug_statistics) {
7905 atomic_inc(&sdebug_completions);
7906 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7907 atomic_inc(&sdebug_miss_cpus);
7910 sdebug_free_queued_cmd(sqcp);
7912 scsi_done(cmd); /* callback to mid level */
7913 (*data->num_entries)++;
7917 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7919 int num_entries = 0;
7920 struct sdebug_blk_mq_poll_data data = {
7921 .queue_num = queue_num,
7922 .num_entries = &num_entries,
7925 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7928 if (num_entries > 0)
7929 atomic_add(num_entries, &sdeb_mq_poll_count);
7933 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
7935 struct scsi_device *sdp = cmnd->device;
7936 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7937 struct sdebug_err_inject *err;
7938 unsigned char *cmd = cmnd->cmnd;
7945 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7946 if (err->type == ERR_TMOUT_CMD &&
7947 (err->cmd == cmd[0] || err->cmd == 0xff)) {
7961 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
7963 struct scsi_device *sdp = cmnd->device;
7964 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7965 struct sdebug_err_inject *err;
7966 unsigned char *cmd = cmnd->cmnd;
7973 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
7974 if (err->type == ERR_FAIL_QUEUE_CMD &&
7975 (err->cmd == cmd[0] || err->cmd == 0xff)) {
7976 ret = err->cnt ? err->queuecmd_ret : 0;
7989 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
7990 struct sdebug_err_inject *info)
7992 struct scsi_device *sdp = cmnd->device;
7993 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
7994 struct sdebug_err_inject *err;
7995 unsigned char *cmd = cmnd->cmnd;
8003 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8004 if (err->type == ERR_FAIL_CMD &&
8005 (err->cmd == cmd[0] || err->cmd == 0xff)) {
8023 mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8024 result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8026 *retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8031 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8032 struct scsi_cmnd *scp)
8035 struct scsi_device *sdp = scp->device;
8036 const struct opcode_info_t *oip;
8037 const struct opcode_info_t *r_oip;
8038 struct sdebug_dev_info *devip;
8039 u8 *cmd = scp->cmnd;
8040 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8041 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8044 u64 lun_index = sdp->lun & 0x3FFF;
8051 struct sdebug_err_inject err;
8053 scsi_set_resid(scp, 0);
8054 if (sdebug_statistics) {
8055 atomic_inc(&sdebug_cmnd_count);
8056 inject_now = inject_on_this_cmd();
8060 if (unlikely(sdebug_verbose &&
8061 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8066 sb = (int)sizeof(b);
8068 strcpy(b, "too long, over 32 bytes");
8070 for (k = 0, n = 0; k < len && n < sb; ++k)
8071 n += scnprintf(b + n, sb - n, "%02x ",
8074 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8075 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8077 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8078 return SCSI_MLQUEUE_HOST_BUSY;
8079 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8080 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8083 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
8084 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
8085 devip = (struct sdebug_dev_info *)sdp->hostdata;
8086 if (unlikely(!devip)) {
8087 devip = find_build_dev_info(sdp);
8092 if (sdebug_timeout_cmd(scp)) {
8093 scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8097 ret = sdebug_fail_queue_cmd(scp);
8099 scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8104 if (sdebug_fail_cmd(scp, &ret, &err)) {
8105 scmd_printk(KERN_INFO, scp,
8106 "fail command 0x%x with hostbyte=0x%x, "
8107 "driverbyte=0x%x, statusbyte=0x%x, "
8108 "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8109 opcode, err.host_byte, err.driver_byte,
8110 err.status_byte, err.sense_key, err.asc, err.asq);
8114 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8115 atomic_set(&sdeb_inject_pending, 1);
8117 na = oip->num_attached;
8119 if (na) { /* multiple commands with this opcode */
8121 if (FF_SA & r_oip->flags) {
8122 if (F_SA_LOW & oip->flags)
8125 sa = get_unaligned_be16(cmd + 8);
8126 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8127 if (opcode == oip->opcode && sa == oip->sa)
8130 } else { /* since no service action only check opcode */
8131 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8132 if (opcode == oip->opcode)
8137 if (F_SA_LOW & r_oip->flags)
8138 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8139 else if (F_SA_HIGH & r_oip->flags)
8140 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8142 mk_sense_invalid_opcode(scp);
8145 } /* else (when na==0) we assume the oip is a match */
8147 if (unlikely(F_INV_OP & flags)) {
8148 mk_sense_invalid_opcode(scp);
8151 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8153 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8154 my_name, opcode, " supported for wlun");
8155 mk_sense_invalid_opcode(scp);
8158 if (unlikely(sdebug_strict)) { /* check cdb against mask */
8162 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8163 rem = ~oip->len_mask[k] & cmd[k];
8165 for (j = 7; j >= 0; --j, rem <<= 1) {
8169 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8174 if (unlikely(!(F_SKIP_UA & flags) &&
8175 find_first_bit(devip->uas_bm,
8176 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8177 errsts = make_ua(scp, devip);
8181 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8182 atomic_read(&devip->stopped))) {
8183 errsts = resp_not_ready(scp, devip);
8187 if (sdebug_fake_rw && (F_FAKE_RW & flags))
8189 if (unlikely(sdebug_every_nth)) {
8190 if (fake_timeout(scp))
8191 return 0; /* ignore command: make trouble */
8193 if (likely(oip->pfp))
8194 pfp = oip->pfp; /* calls a resp_* function */
8196 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
8199 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
8200 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8201 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8202 sdebug_ndelay > 10000)) {
8204 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8205 * for Start Stop Unit (SSU) want at least 1 second delay and
8206 * if sdebug_jdelay>1 want a long delay of that many seconds.
8207 * For Synchronize Cache want 1/20 of SSU's delay.
8209 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8210 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8212 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8213 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8215 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8218 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8220 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8223 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8225 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8227 spin_lock_init(&sdsc->lock);
8232 static struct scsi_host_template sdebug_driver_template = {
8233 .show_info = scsi_debug_show_info,
8234 .write_info = scsi_debug_write_info,
8235 .proc_name = sdebug_proc_name,
8236 .name = "SCSI DEBUG",
8237 .info = scsi_debug_info,
8238 .slave_alloc = scsi_debug_slave_alloc,
8239 .slave_configure = scsi_debug_slave_configure,
8240 .slave_destroy = scsi_debug_slave_destroy,
8241 .ioctl = scsi_debug_ioctl,
8242 .queuecommand = scsi_debug_queuecommand,
8243 .change_queue_depth = sdebug_change_qdepth,
8244 .map_queues = sdebug_map_queues,
8245 .mq_poll = sdebug_blk_mq_poll,
8246 .eh_abort_handler = scsi_debug_abort,
8247 .eh_device_reset_handler = scsi_debug_device_reset,
8248 .eh_target_reset_handler = scsi_debug_target_reset,
8249 .eh_bus_reset_handler = scsi_debug_bus_reset,
8250 .eh_host_reset_handler = scsi_debug_host_reset,
8251 .can_queue = SDEBUG_CANQUEUE,
8253 .sg_tablesize = SG_MAX_SEGMENTS,
8254 .cmd_per_lun = DEF_CMD_PER_LUN,
8256 .max_segment_size = -1U,
8257 .module = THIS_MODULE,
8258 .track_queue_depth = 1,
8259 .cmd_size = sizeof(struct sdebug_scsi_cmd),
8260 .init_cmd_priv = sdebug_init_cmd_priv,
8261 .target_alloc = sdebug_target_alloc,
8262 .target_destroy = sdebug_target_destroy,
8265 static int sdebug_driver_probe(struct device *dev)
8268 struct sdebug_host_info *sdbg_host;
8269 struct Scsi_Host *hpnt;
8272 sdbg_host = dev_to_sdebug_host(dev);
8274 sdebug_driver_template.can_queue = sdebug_max_queue;
8275 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8276 if (!sdebug_clustering)
8277 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8279 hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8281 pr_err("scsi_host_alloc failed\n");
8285 if (submit_queues > nr_cpu_ids) {
8286 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8287 my_name, submit_queues, nr_cpu_ids);
8288 submit_queues = nr_cpu_ids;
8291 * Decide whether to tell scsi subsystem that we want mq. The
8292 * following should give the same answer for each host.
8294 hpnt->nr_hw_queues = submit_queues;
8295 if (sdebug_host_max_queue)
8296 hpnt->host_tagset = 1;
8298 /* poll queues are possible for nr_hw_queues > 1 */
8299 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8300 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8301 my_name, poll_queues, hpnt->nr_hw_queues);
8306 * Poll queues don't need interrupts, but we need at least one I/O queue
8307 * left over for non-polled I/O.
8308 * If condition not met, trim poll_queues to 1 (just for simplicity).
8310 if (poll_queues >= submit_queues) {
8311 if (submit_queues < 3)
8312 pr_warn("%s: trim poll_queues to 1\n", my_name);
8314 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8315 my_name, submit_queues - 1);
8321 sdbg_host->shost = hpnt;
8322 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8323 hpnt->max_id = sdebug_num_tgts + 1;
8325 hpnt->max_id = sdebug_num_tgts;
8326 /* = sdebug_max_luns; */
8327 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8331 switch (sdebug_dif) {
8333 case T10_PI_TYPE1_PROTECTION:
8334 hprot = SHOST_DIF_TYPE1_PROTECTION;
8336 hprot |= SHOST_DIX_TYPE1_PROTECTION;
8339 case T10_PI_TYPE2_PROTECTION:
8340 hprot = SHOST_DIF_TYPE2_PROTECTION;
8342 hprot |= SHOST_DIX_TYPE2_PROTECTION;
8345 case T10_PI_TYPE3_PROTECTION:
8346 hprot = SHOST_DIF_TYPE3_PROTECTION;
8348 hprot |= SHOST_DIX_TYPE3_PROTECTION;
8353 hprot |= SHOST_DIX_TYPE0_PROTECTION;
8357 scsi_host_set_prot(hpnt, hprot);
8359 if (have_dif_prot || sdebug_dix)
8360 pr_info("host protection%s%s%s%s%s%s%s\n",
8361 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8362 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8363 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8364 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8365 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8366 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8367 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8369 if (sdebug_guard == 1)
8370 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8372 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8374 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8375 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8376 if (sdebug_every_nth) /* need stats counters for every_nth */
8377 sdebug_statistics = true;
8378 error = scsi_add_host(hpnt, &sdbg_host->dev);
8380 pr_err("scsi_add_host failed\n");
8382 scsi_host_put(hpnt);
8384 scsi_scan_host(hpnt);
8390 static void sdebug_driver_remove(struct device *dev)
8392 struct sdebug_host_info *sdbg_host;
8393 struct sdebug_dev_info *sdbg_devinfo, *tmp;
8395 sdbg_host = dev_to_sdebug_host(dev);
8397 scsi_remove_host(sdbg_host->shost);
8399 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8401 list_del(&sdbg_devinfo->dev_list);
8402 kfree(sdbg_devinfo->zstate);
8403 kfree(sdbg_devinfo);
8406 scsi_host_put(sdbg_host->shost);
8409 static struct bus_type pseudo_lld_bus = {
8411 .probe = sdebug_driver_probe,
8412 .remove = sdebug_driver_remove,
8413 .drv_groups = sdebug_drv_groups,