1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
48 #include <net/checksum.h>
50 #include <asm/unaligned.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
62 #include "scsi_logging.h"
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
68 #define MY_NAME "scsi_debug"
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define LOGICAL_UNIT_NOT_READY 0x4
73 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
74 #define UNRECOVERED_READ_ERR 0x11
75 #define PARAMETER_LIST_LENGTH_ERR 0x1a
76 #define INVALID_OPCODE 0x20
77 #define LBA_OUT_OF_RANGE 0x21
78 #define INVALID_FIELD_IN_CDB 0x24
79 #define INVALID_FIELD_IN_PARAM_LIST 0x26
80 #define WRITE_PROTECTED 0x27
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define TARGET_CHANGED_ASC 0x3f
84 #define LUNS_CHANGED_ASCQ 0x0e
85 #define INSUFF_RES_ASC 0x55
86 #define INSUFF_RES_ASCQ 0x3
87 #define POWER_ON_RESET_ASCQ 0x0
88 #define POWER_ON_OCCURRED_ASCQ 0x1
89 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
90 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
91 #define CAPACITY_CHANGED_ASCQ 0x9
92 #define SAVING_PARAMS_UNSUP 0x39
93 #define TRANSPORT_PROBLEM 0x4b
94 #define THRESHOLD_EXCEEDED 0x5d
95 #define LOW_POWER_COND_ON 0x5e
96 #define MISCOMPARE_VERIFY_ASC 0x1d
97 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
98 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
99 #define WRITE_ERROR_ASC 0xc
100 #define UNALIGNED_WRITE_ASCQ 0x4
101 #define WRITE_BOUNDARY_ASCQ 0x5
102 #define READ_INVDATA_ASCQ 0x6
103 #define READ_BOUNDARY_ASCQ 0x7
104 #define ATTEMPT_ACCESS_GAP 0x9
105 #define INSUFF_ZONE_ASCQ 0xe
107 /* Additional Sense Code Qualifier (ASCQ) */
108 #define ACK_NAK_TO 0x3
110 /* Default values for driver parameters */
111 #define DEF_NUM_HOST 1
112 #define DEF_NUM_TGTS 1
113 #define DEF_MAX_LUNS 1
114 /* With these defaults, this driver will make 1 host with 1 target
115 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
118 #define DEF_CDB_LEN 10
119 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
120 #define DEF_DEV_SIZE_PRE_INIT 0
121 #define DEF_DEV_SIZE_MB 8
122 #define DEF_ZBC_DEV_SIZE_MB 128
125 #define DEF_PER_HOST_STORE false
126 #define DEF_D_SENSE 0
127 #define DEF_EVERY_NTH 0
128 #define DEF_FAKE_RW 0
130 #define DEF_HOST_LOCK 0
133 #define DEF_LBPWS10 0
135 #define DEF_LOWEST_ALIGNED 0
136 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
137 #define DEF_NO_LUN_0 0
138 #define DEF_NUM_PARTS 0
140 #define DEF_OPT_BLKS 1024
141 #define DEF_PHYSBLK_EXP 0
142 #define DEF_OPT_XFERLEN_EXP 0
143 #define DEF_PTYPE TYPE_DISK
144 #define DEF_RANDOM false
145 #define DEF_REMOVABLE false
146 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
147 #define DEF_SECTOR_SIZE 512
148 #define DEF_UNMAP_ALIGNMENT 0
149 #define DEF_UNMAP_GRANULARITY 1
150 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
151 #define DEF_UNMAP_MAX_DESC 256
152 #define DEF_VIRTUAL_GB 0
153 #define DEF_VPD_USE_HOSTNO 1
154 #define DEF_WRITESAME_LENGTH 0xFFFF
156 #define DEF_STATISTICS false
157 #define DEF_SUBMIT_QUEUES 1
158 #define DEF_TUR_MS_TO_READY 0
159 #define DEF_UUID_CTL 0
160 #define JDELAY_OVERRIDDEN -9999
162 /* Default parameters for ZBC drives */
163 #define DEF_ZBC_ZONE_SIZE_MB 128
164 #define DEF_ZBC_MAX_OPEN_ZONES 8
165 #define DEF_ZBC_NR_CONV_ZONES 1
167 #define SDEBUG_LUN_0_VAL 0
169 /* bit mask values for sdebug_opts */
170 #define SDEBUG_OPT_NOISE 1
171 #define SDEBUG_OPT_MEDIUM_ERR 2
172 #define SDEBUG_OPT_TIMEOUT 4
173 #define SDEBUG_OPT_RECOVERED_ERR 8
174 #define SDEBUG_OPT_TRANSPORT_ERR 16
175 #define SDEBUG_OPT_DIF_ERR 32
176 #define SDEBUG_OPT_DIX_ERR 64
177 #define SDEBUG_OPT_MAC_TIMEOUT 128
178 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
179 #define SDEBUG_OPT_Q_NOISE 0x200
180 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
181 #define SDEBUG_OPT_RARE_TSF 0x800
182 #define SDEBUG_OPT_N_WCE 0x1000
183 #define SDEBUG_OPT_RESET_NOISE 0x2000
184 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
185 #define SDEBUG_OPT_HOST_BUSY 0x8000
186 #define SDEBUG_OPT_CMD_ABORT 0x10000
187 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
188 SDEBUG_OPT_RESET_NOISE)
189 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
190 SDEBUG_OPT_TRANSPORT_ERR | \
191 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
192 SDEBUG_OPT_SHORT_TRANSFER | \
193 SDEBUG_OPT_HOST_BUSY | \
194 SDEBUG_OPT_CMD_ABORT)
195 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
196 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
198 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
199 * priority order. In the subset implemented here lower numbers have higher
200 * priority. The UA numbers should be a sequence starting from 0 with
201 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
202 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
203 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
204 #define SDEBUG_UA_BUS_RESET 2
205 #define SDEBUG_UA_MODE_CHANGED 3
206 #define SDEBUG_UA_CAPACITY_CHANGED 4
207 #define SDEBUG_UA_LUNS_CHANGED 5
208 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
209 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
210 #define SDEBUG_NUM_UAS 8
212 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
213 * sector on read commands: */
214 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
215 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
217 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
218 * (for response) per submit queue at one time. Can be reduced by max_queue
219 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
220 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
221 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
222 * but cannot exceed SDEBUG_CANQUEUE .
224 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
225 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
226 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
228 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
229 #define F_D_IN 1 /* Data-in command (e.g. READ) */
230 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
231 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
233 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
234 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
235 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
236 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
237 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
238 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
239 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
240 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
241 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
242 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
244 /* Useful combinations of the above flags */
245 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
246 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
247 #define FF_SA (F_SA_HIGH | F_SA_LOW)
248 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
250 #define SDEBUG_MAX_PARTS 4
252 #define SDEBUG_MAX_CMD_LEN 32
254 #define SDEB_XA_NOT_IN_USE XA_MARK_1
256 static struct kmem_cache *queued_cmd_cache;
258 #define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
259 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
261 /* Zone types (zbcr05 table 25) */
266 /* ZBC_ZTYPE_SOBR = 0x4, */
270 /* enumeration names taken from table 26, zbcr05 */
272 ZBC_NOT_WRITE_POINTER = 0x0,
274 ZC2_IMPLICIT_OPEN = 0x2,
275 ZC3_EXPLICIT_OPEN = 0x3,
282 struct sdeb_zone_state { /* ZBC: per zone state */
283 enum sdebug_z_type z_type;
284 enum sdebug_z_cond z_cond;
285 bool z_non_seq_resource;
291 enum sdebug_err_type {
292 ERR_TMOUT_CMD = 0, /* make specific scsi command timeout */
293 ERR_FAIL_QUEUE_CMD = 1, /* make specific scsi command's */
294 /* queuecmd return failed */
295 ERR_FAIL_CMD = 2, /* make specific scsi command's */
296 /* queuecmd return succeed but */
297 /* with errors set in scsi_cmnd */
298 ERR_ABORT_CMD_FAILED = 3, /* control return FAILED from */
299 /* scsi_debug_abort() */
300 ERR_LUN_RESET_FAILED = 4, /* control return FAILED from */
301 /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
304 struct sdebug_err_inject {
306 struct list_head list;
313 * For ERR_FAIL_QUEUE_CMD
321 unsigned char host_byte;
322 unsigned char driver_byte;
323 unsigned char status_byte;
324 unsigned char sense_key;
331 struct sdebug_dev_info {
332 struct list_head dev_list;
333 unsigned int channel;
337 struct sdebug_host_info *sdbg_host;
338 unsigned long uas_bm[1];
339 atomic_t stopped; /* 1: by SSU, 2: device start */
342 /* For ZBC devices */
346 unsigned int zsize_shift;
347 unsigned int nr_zones;
348 unsigned int nr_conv_zones;
349 unsigned int nr_seq_zones;
350 unsigned int nr_imp_open;
351 unsigned int nr_exp_open;
352 unsigned int nr_closed;
353 unsigned int max_open;
354 ktime_t create_ts; /* time since bootup that this device was created */
355 struct sdeb_zone_state *zstate;
357 struct dentry *debugfs_entry;
358 struct spinlock list_lock;
359 struct list_head inject_err_list;
362 struct sdebug_target_info {
364 struct dentry *debugfs_entry;
367 struct sdebug_host_info {
368 struct list_head host_list;
369 int si_idx; /* sdeb_store_info (per host) xarray index */
370 struct Scsi_Host *shost;
372 struct list_head dev_info_list;
375 /* There is an xarray of pointers to this struct's objects, one per host */
376 struct sdeb_store_info {
377 rwlock_t macc_lck; /* for atomic media access on this store */
378 u8 *storep; /* user data storage (ram) */
379 struct t10_pi_tuple *dif_storep; /* protection info */
380 void *map_storep; /* provisioning map */
383 #define dev_to_sdebug_host(d) \
384 container_of(d, struct sdebug_host_info, dev)
386 #define shost_to_sdebug_host(shost) \
387 dev_to_sdebug_host(shost->dma_dev)
389 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
390 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
392 struct sdebug_defer {
394 struct execute_work ew;
395 ktime_t cmpl_ts;/* time since boot to complete this cmd */
397 bool aborted; /* true when blk_abort_request() already called */
398 enum sdeb_defer_type defer_t;
401 struct sdebug_queued_cmd {
402 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
403 * instance indicates this slot is in use.
405 struct sdebug_defer sd_dp;
406 struct scsi_cmnd *scmd;
409 struct sdebug_scsi_cmd {
413 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
414 static atomic_t sdebug_completions; /* count of deferred completions */
415 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
416 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
417 static atomic_t sdeb_inject_pending;
418 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
420 struct opcode_info_t {
421 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
422 /* for terminating element */
423 u8 opcode; /* if num_attached > 0, preferred */
424 u16 sa; /* service action */
425 u32 flags; /* OR-ed set of SDEB_F_* */
426 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
427 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
428 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
429 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
432 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
433 enum sdeb_opcode_index {
434 SDEB_I_INVALID_OPCODE = 0,
436 SDEB_I_REPORT_LUNS = 2,
437 SDEB_I_REQUEST_SENSE = 3,
438 SDEB_I_TEST_UNIT_READY = 4,
439 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
440 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
441 SDEB_I_LOG_SENSE = 7,
442 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
443 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
444 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
445 SDEB_I_START_STOP = 11,
446 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
447 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
448 SDEB_I_MAINT_IN = 14,
449 SDEB_I_MAINT_OUT = 15,
450 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
451 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
452 SDEB_I_RESERVE = 18, /* 6, 10 */
453 SDEB_I_RELEASE = 19, /* 6, 10 */
454 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
455 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
456 SDEB_I_ATA_PT = 22, /* 12, 16 */
457 SDEB_I_SEND_DIAG = 23,
459 SDEB_I_WRITE_BUFFER = 25,
460 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
461 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
462 SDEB_I_COMP_WRITE = 28,
463 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
464 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
465 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
466 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
470 static const unsigned char opcode_ind_arr[256] = {
471 /* 0x0; 0x0->0x1f: 6 byte cdbs */
472 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
474 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
475 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
477 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
478 SDEB_I_ALLOW_REMOVAL, 0,
479 /* 0x20; 0x20->0x3f: 10 byte cdbs */
480 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
481 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
482 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
483 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
484 /* 0x40; 0x40->0x5f: 10 byte cdbs */
485 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
486 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
487 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
489 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
490 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
491 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
493 0, SDEB_I_VARIABLE_LEN,
494 /* 0x80; 0x80->0x9f: 16 byte cdbs */
495 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
496 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
497 0, 0, 0, SDEB_I_VERIFY,
498 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
499 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
500 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
501 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
502 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
503 SDEB_I_MAINT_OUT, 0, 0, 0,
504 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
505 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
506 0, 0, 0, 0, 0, 0, 0, 0,
507 0, 0, 0, 0, 0, 0, 0, 0,
508 /* 0xc0; 0xc0->0xff: vendor specific */
509 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
510 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
511 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
512 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
516 * The following "response" functions return the SCSI mid-level's 4 byte
517 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
518 * command completion, they can mask their return value with
519 * SDEG_RES_IMMED_MASK .
521 #define SDEG_RES_IMMED_MASK 0x40000000
523 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
524 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
525 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
526 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
527 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
528 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
529 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
530 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
531 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
532 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
533 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
534 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
535 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
536 static int resp_get_stream_status(struct scsi_cmnd *scp,
537 struct sdebug_dev_info *devip);
538 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
539 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
540 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
541 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
542 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
543 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
544 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
545 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
546 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
547 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
548 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
549 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
550 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
551 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
552 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
553 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
555 static int sdebug_do_add_host(bool mk_new_store);
556 static int sdebug_add_host_helper(int per_host_idx);
557 static void sdebug_do_remove_host(bool the_end);
558 static int sdebug_add_store(void);
559 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
560 static void sdebug_erase_all_stores(bool apart_from_first);
562 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
565 * The following are overflow arrays for cdbs that "hit" the same index in
566 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
567 * should be placed in opcode_info_arr[], the others should be placed here.
569 static const struct opcode_info_t msense_iarr[] = {
570 {0, 0x1a, 0, F_D_IN, NULL, NULL,
571 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
574 static const struct opcode_info_t mselect_iarr[] = {
575 {0, 0x15, 0, F_D_OUT, NULL, NULL,
576 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
579 static const struct opcode_info_t read_iarr[] = {
580 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
581 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
583 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
584 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
585 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
586 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
590 static const struct opcode_info_t write_iarr[] = {
591 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
592 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
594 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
595 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
597 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
598 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
599 0xbf, 0xc7, 0, 0, 0, 0} },
602 static const struct opcode_info_t verify_iarr[] = {
603 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
604 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
608 static const struct opcode_info_t sa_in_16_iarr[] = {
609 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
610 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
611 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
612 {0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
613 {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
614 0, 0} }, /* GET STREAM STATUS */
617 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
618 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
619 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
620 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
621 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
622 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
623 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
626 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
627 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
628 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
629 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
630 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
631 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
632 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
635 static const struct opcode_info_t write_same_iarr[] = {
636 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
637 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
638 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
641 static const struct opcode_info_t reserve_iarr[] = {
642 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
643 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
646 static const struct opcode_info_t release_iarr[] = {
647 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
648 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
651 static const struct opcode_info_t sync_cache_iarr[] = {
652 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
653 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
654 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
657 static const struct opcode_info_t pre_fetch_iarr[] = {
658 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
659 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
663 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
664 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
665 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
666 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
667 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
668 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
669 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
670 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
671 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
675 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
676 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
677 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
678 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
682 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
683 * plus the terminating elements for logic that scans this table such as
684 * REPORT SUPPORTED OPERATION CODES. */
685 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
687 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
688 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
689 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
690 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
692 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
693 0, 0} }, /* REPORT LUNS */
694 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
695 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
697 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
700 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
701 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
702 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
703 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
704 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
705 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
706 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
708 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
709 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
711 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
712 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
713 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
715 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
716 resp_write_dt0, write_iarr, /* WRITE(16) */
717 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
718 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
719 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
720 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
721 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
722 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
723 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
725 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
726 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
727 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
728 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
729 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
730 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
731 0xff, 0, 0xc7, 0, 0, 0, 0} },
733 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
734 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
735 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
736 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
737 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
738 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
739 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
740 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
741 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
743 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
744 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
745 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
747 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
748 NULL, release_iarr, /* RELEASE(10) <no response function> */
749 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
752 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
753 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
754 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
755 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
756 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
757 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
758 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
759 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
760 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
761 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
763 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
764 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
765 0, 0, 0, 0} }, /* WRITE_BUFFER */
766 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
767 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
768 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
770 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
771 resp_sync_cache, sync_cache_iarr,
772 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
773 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
774 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
775 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
776 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
777 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
778 resp_pre_fetch, pre_fetch_iarr,
779 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
780 0, 0, 0, 0} }, /* PRE-FETCH (10) */
783 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
784 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
785 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
786 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
787 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
788 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
789 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
790 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
792 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
793 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
796 static int sdebug_num_hosts;
797 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
798 static int sdebug_ato = DEF_ATO;
799 static int sdebug_cdb_len = DEF_CDB_LEN;
800 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
801 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
802 static int sdebug_dif = DEF_DIF;
803 static int sdebug_dix = DEF_DIX;
804 static int sdebug_dsense = DEF_D_SENSE;
805 static int sdebug_every_nth = DEF_EVERY_NTH;
806 static int sdebug_fake_rw = DEF_FAKE_RW;
807 static unsigned int sdebug_guard = DEF_GUARD;
808 static int sdebug_host_max_queue; /* per host */
809 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
810 static int sdebug_max_luns = DEF_MAX_LUNS;
811 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
812 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
813 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
814 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
815 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
816 static int sdebug_no_uld;
817 static int sdebug_num_parts = DEF_NUM_PARTS;
818 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
819 static int sdebug_opt_blks = DEF_OPT_BLKS;
820 static int sdebug_opts = DEF_OPTS;
821 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
822 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
823 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
824 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
825 static int sdebug_sector_size = DEF_SECTOR_SIZE;
826 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
827 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
828 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
829 static unsigned int sdebug_lbpu = DEF_LBPU;
830 static unsigned int sdebug_lbpws = DEF_LBPWS;
831 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
832 static unsigned int sdebug_lbprz = DEF_LBPRZ;
833 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
834 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
835 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
836 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
837 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
838 static int sdebug_uuid_ctl = DEF_UUID_CTL;
839 static bool sdebug_random = DEF_RANDOM;
840 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
841 static bool sdebug_removable = DEF_REMOVABLE;
842 static bool sdebug_clustering;
843 static bool sdebug_host_lock = DEF_HOST_LOCK;
844 static bool sdebug_strict = DEF_STRICT;
845 static bool sdebug_any_injecting_opt;
846 static bool sdebug_no_rwlock;
847 static bool sdebug_verbose;
848 static bool have_dif_prot;
849 static bool write_since_sync;
850 static bool sdebug_statistics = DEF_STATISTICS;
851 static bool sdebug_wp;
852 static bool sdebug_allow_restart;
857 } sdeb_zbc_model = BLK_ZONED_NONE;
858 static char *sdeb_zbc_model_s;
860 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
861 SAM_LUN_AM_FLAT = 0x1,
862 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
863 SAM_LUN_AM_EXTENDED = 0x3};
864 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
865 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
867 static unsigned int sdebug_store_sectors;
868 static sector_t sdebug_capacity; /* in sectors */
870 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
871 may still need them */
872 static int sdebug_heads; /* heads per disk */
873 static int sdebug_cylinders_per; /* cylinders per surface */
874 static int sdebug_sectors_per; /* sectors per cylinder */
876 static LIST_HEAD(sdebug_host_list);
877 static DEFINE_MUTEX(sdebug_host_list_mutex);
879 static struct xarray per_store_arr;
880 static struct xarray *per_store_ap = &per_store_arr;
881 static int sdeb_first_idx = -1; /* invalid index ==> none created */
882 static int sdeb_most_recent_idx = -1;
883 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
885 static unsigned long map_size;
886 static int num_aborts;
887 static int num_dev_resets;
888 static int num_target_resets;
889 static int num_bus_resets;
890 static int num_host_resets;
891 static int dix_writes;
892 static int dix_reads;
893 static int dif_errors;
895 /* ZBC global data */
896 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
897 static int sdeb_zbc_zone_cap_mb;
898 static int sdeb_zbc_zone_size_mb;
899 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
900 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
902 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
903 static int poll_queues; /* iouring iopoll interface.*/
905 static char sdebug_proc_name[] = MY_NAME;
906 static const char *my_name = MY_NAME;
908 static struct bus_type pseudo_lld_bus;
910 static struct device_driver sdebug_driverfs_driver = {
911 .name = sdebug_proc_name,
912 .bus = &pseudo_lld_bus,
915 static const int check_condition_result =
916 SAM_STAT_CHECK_CONDITION;
918 static const int illegal_condition_result =
919 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
921 static const int device_qfull_result =
922 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
924 static const int condition_met_result = SAM_STAT_CONDITION_MET;
926 static struct dentry *sdebug_debugfs_root;
928 static void sdebug_err_free(struct rcu_head *head)
930 struct sdebug_err_inject *inject =
931 container_of(head, typeof(*inject), rcu);
936 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
938 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
939 struct sdebug_err_inject *err;
941 spin_lock(&devip->list_lock);
942 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
943 if (err->type == new->type && err->cmd == new->cmd) {
944 list_del_rcu(&err->list);
945 call_rcu(&err->rcu, sdebug_err_free);
949 list_add_tail_rcu(&new->list, &devip->inject_err_list);
950 spin_unlock(&devip->list_lock);
953 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
955 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
956 struct sdebug_err_inject *err;
960 if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
965 spin_lock(&devip->list_lock);
966 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
967 if (err->type == type && err->cmd == cmd) {
968 list_del_rcu(&err->list);
969 call_rcu(&err->rcu, sdebug_err_free);
970 spin_unlock(&devip->list_lock);
975 spin_unlock(&devip->list_lock);
981 static int sdebug_error_show(struct seq_file *m, void *p)
983 struct scsi_device *sdev = (struct scsi_device *)m->private;
984 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
985 struct sdebug_err_inject *err;
987 seq_puts(m, "Type\tCount\tCommand\n");
990 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
993 case ERR_ABORT_CMD_FAILED:
994 case ERR_LUN_RESET_FAILED:
995 seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
999 case ERR_FAIL_QUEUE_CMD:
1000 seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1001 err->cnt, err->cmd, err->queuecmd_ret);
1005 seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1006 err->type, err->cnt, err->cmd,
1007 err->host_byte, err->driver_byte,
1008 err->status_byte, err->sense_key,
1009 err->asc, err->asq);
1018 static int sdebug_error_open(struct inode *inode, struct file *file)
1020 return single_open(file, sdebug_error_show, inode->i_private);
1023 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1024 size_t count, loff_t *ppos)
1027 unsigned int inject_type;
1028 struct sdebug_err_inject *inject;
1029 struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1031 buf = kzalloc(count + 1, GFP_KERNEL);
1035 if (copy_from_user(buf, ubuf, count)) {
1041 return sdebug_err_remove(sdev, buf, count);
1043 if (sscanf(buf, "%d", &inject_type) != 1) {
1048 inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1054 switch (inject_type) {
1056 case ERR_ABORT_CMD_FAILED:
1057 case ERR_LUN_RESET_FAILED:
1058 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1063 case ERR_FAIL_QUEUE_CMD:
1064 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1065 &inject->cmd, &inject->queuecmd_ret) != 4)
1070 if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1071 &inject->type, &inject->cnt, &inject->cmd,
1072 &inject->host_byte, &inject->driver_byte,
1073 &inject->status_byte, &inject->sense_key,
1074 &inject->asc, &inject->asq) != 9)
1084 sdebug_err_add(sdev, inject);
1094 static const struct file_operations sdebug_error_fops = {
1095 .open = sdebug_error_open,
1097 .write = sdebug_error_write,
1098 .release = single_release,
1101 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1103 struct scsi_target *starget = (struct scsi_target *)m->private;
1104 struct sdebug_target_info *targetip =
1105 (struct sdebug_target_info *)starget->hostdata;
1108 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1113 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1115 return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1118 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1119 const char __user *ubuf, size_t count, loff_t *ppos)
1122 struct scsi_target *starget =
1123 (struct scsi_target *)file->f_inode->i_private;
1124 struct sdebug_target_info *targetip =
1125 (struct sdebug_target_info *)starget->hostdata;
1128 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1129 return ret < 0 ? ret : count;
1134 static const struct file_operations sdebug_target_reset_fail_fops = {
1135 .open = sdebug_target_reset_fail_open,
1137 .write = sdebug_target_reset_fail_write,
1138 .release = single_release,
1141 static int sdebug_target_alloc(struct scsi_target *starget)
1143 struct sdebug_target_info *targetip;
1145 targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1149 targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1150 sdebug_debugfs_root);
1152 debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1153 &sdebug_target_reset_fail_fops);
1155 starget->hostdata = targetip;
1160 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1162 struct sdebug_target_info *targetip = data;
1164 debugfs_remove(targetip->debugfs_entry);
1168 static void sdebug_target_destroy(struct scsi_target *starget)
1170 struct sdebug_target_info *targetip;
1172 targetip = (struct sdebug_target_info *)starget->hostdata;
1174 starget->hostdata = NULL;
1175 async_schedule(sdebug_tartget_cleanup_async, targetip);
1179 /* Only do the extra work involved in logical block provisioning if one or
1180 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1181 * real reads and writes (i.e. not skipping them for speed).
1183 static inline bool scsi_debug_lbp(void)
1185 return 0 == sdebug_fake_rw &&
1186 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1189 static void *lba2fake_store(struct sdeb_store_info *sip,
1190 unsigned long long lba)
1192 struct sdeb_store_info *lsip = sip;
1194 lba = do_div(lba, sdebug_store_sectors);
1195 if (!sip || !sip->storep) {
1197 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
1199 return lsip->storep + lba * sdebug_sector_size;
1202 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1205 sector = sector_div(sector, sdebug_store_sectors);
1207 return sip->dif_storep + sector;
1210 static void sdebug_max_tgts_luns(void)
1212 struct sdebug_host_info *sdbg_host;
1213 struct Scsi_Host *hpnt;
1215 mutex_lock(&sdebug_host_list_mutex);
1216 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1217 hpnt = sdbg_host->shost;
1218 if ((hpnt->this_id >= 0) &&
1219 (sdebug_num_tgts > hpnt->this_id))
1220 hpnt->max_id = sdebug_num_tgts + 1;
1222 hpnt->max_id = sdebug_num_tgts;
1223 /* sdebug_max_luns; */
1224 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1226 mutex_unlock(&sdebug_host_list_mutex);
1229 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1231 /* Set in_bit to -1 to indicate no bit position of invalid field */
1232 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1233 enum sdeb_cmd_data c_d,
1234 int in_byte, int in_bit)
1236 unsigned char *sbuff;
1240 sbuff = scp->sense_buffer;
1242 sdev_printk(KERN_ERR, scp->device,
1243 "%s: sense_buffer is NULL\n", __func__);
1246 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1247 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1248 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1249 memset(sks, 0, sizeof(sks));
1255 sks[0] |= 0x7 & in_bit;
1257 put_unaligned_be16(in_byte, sks + 1);
1258 if (sdebug_dsense) {
1262 sbuff[sl + 1] = 0x6;
1263 memcpy(sbuff + sl + 4, sks, 3);
1265 memcpy(sbuff + 15, sks, 3);
1267 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
1268 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1269 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1272 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1274 if (!scp->sense_buffer) {
1275 sdev_printk(KERN_ERR, scp->device,
1276 "%s: sense_buffer is NULL\n", __func__);
1279 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1281 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1284 sdev_printk(KERN_INFO, scp->device,
1285 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1286 my_name, key, asc, asq);
1289 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1291 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1294 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1297 if (sdebug_verbose) {
1299 sdev_printk(KERN_INFO, dev,
1300 "%s: BLKFLSBUF [0x1261]\n", __func__);
1301 else if (0x5331 == cmd)
1302 sdev_printk(KERN_INFO, dev,
1303 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1306 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1310 /* return -ENOTTY; // correct return but upsets fdisk */
1313 static void config_cdb_len(struct scsi_device *sdev)
1315 switch (sdebug_cdb_len) {
1316 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1317 sdev->use_10_for_rw = false;
1318 sdev->use_16_for_rw = false;
1319 sdev->use_10_for_ms = false;
1321 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1322 sdev->use_10_for_rw = true;
1323 sdev->use_16_for_rw = false;
1324 sdev->use_10_for_ms = false;
1326 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1327 sdev->use_10_for_rw = true;
1328 sdev->use_16_for_rw = false;
1329 sdev->use_10_for_ms = true;
1332 sdev->use_10_for_rw = false;
1333 sdev->use_16_for_rw = true;
1334 sdev->use_10_for_ms = true;
1336 case 32: /* No knobs to suggest this so same as 16 for now */
1337 sdev->use_10_for_rw = false;
1338 sdev->use_16_for_rw = true;
1339 sdev->use_10_for_ms = true;
1342 pr_warn("unexpected cdb_len=%d, force to 10\n",
1344 sdev->use_10_for_rw = true;
1345 sdev->use_16_for_rw = false;
1346 sdev->use_10_for_ms = false;
1347 sdebug_cdb_len = 10;
1352 static void all_config_cdb_len(void)
1354 struct sdebug_host_info *sdbg_host;
1355 struct Scsi_Host *shost;
1356 struct scsi_device *sdev;
1358 mutex_lock(&sdebug_host_list_mutex);
1359 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1360 shost = sdbg_host->shost;
1361 shost_for_each_device(sdev, shost) {
1362 config_cdb_len(sdev);
1365 mutex_unlock(&sdebug_host_list_mutex);
1368 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1370 struct sdebug_host_info *sdhp = devip->sdbg_host;
1371 struct sdebug_dev_info *dp;
1373 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1374 if ((devip->sdbg_host == dp->sdbg_host) &&
1375 (devip->target == dp->target)) {
1376 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1381 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1385 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1386 if (k != SDEBUG_NUM_UAS) {
1387 const char *cp = NULL;
1391 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1392 POWER_ON_RESET_ASCQ);
1394 cp = "power on reset";
1396 case SDEBUG_UA_POOCCUR:
1397 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1398 POWER_ON_OCCURRED_ASCQ);
1400 cp = "power on occurred";
1402 case SDEBUG_UA_BUS_RESET:
1403 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1408 case SDEBUG_UA_MODE_CHANGED:
1409 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1412 cp = "mode parameters changed";
1414 case SDEBUG_UA_CAPACITY_CHANGED:
1415 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1416 CAPACITY_CHANGED_ASCQ);
1418 cp = "capacity data changed";
1420 case SDEBUG_UA_MICROCODE_CHANGED:
1421 mk_sense_buffer(scp, UNIT_ATTENTION,
1423 MICROCODE_CHANGED_ASCQ);
1425 cp = "microcode has been changed";
1427 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1428 mk_sense_buffer(scp, UNIT_ATTENTION,
1430 MICROCODE_CHANGED_WO_RESET_ASCQ);
1432 cp = "microcode has been changed without reset";
1434 case SDEBUG_UA_LUNS_CHANGED:
1436 * SPC-3 behavior is to report a UNIT ATTENTION with
1437 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1438 * on the target, until a REPORT LUNS command is
1439 * received. SPC-4 behavior is to report it only once.
1440 * NOTE: sdebug_scsi_level does not use the same
1441 * values as struct scsi_device->scsi_level.
1443 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1444 clear_luns_changed_on_target(devip);
1445 mk_sense_buffer(scp, UNIT_ATTENTION,
1449 cp = "reported luns data has changed";
1452 pr_warn("unexpected unit attention code=%d\n", k);
1457 clear_bit(k, devip->uas_bm);
1459 sdev_printk(KERN_INFO, scp->device,
1460 "%s reports: Unit attention: %s\n",
1462 return check_condition_result;
1467 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1468 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1472 struct scsi_data_buffer *sdb = &scp->sdb;
1476 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1477 return DID_ERROR << 16;
1479 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1481 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1486 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1487 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1488 * calls, not required to write in ascending offset order. Assumes resid
1489 * set to scsi_bufflen() prior to any calls.
1491 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1492 int arr_len, unsigned int off_dst)
1494 unsigned int act_len, n;
1495 struct scsi_data_buffer *sdb = &scp->sdb;
1496 off_t skip = off_dst;
1498 if (sdb->length <= off_dst)
1500 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1501 return DID_ERROR << 16;
1503 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1504 arr, arr_len, skip);
1505 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1506 __func__, off_dst, scsi_bufflen(scp), act_len,
1507 scsi_get_resid(scp));
1508 n = scsi_bufflen(scp) - (off_dst + act_len);
1509 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1513 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1514 * 'arr' or -1 if error.
1516 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1519 if (!scsi_bufflen(scp))
1521 if (scp->sc_data_direction != DMA_TO_DEVICE)
1524 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1528 static char sdebug_inq_vendor_id[9] = "Linux ";
1529 static char sdebug_inq_product_id[17] = "scsi_debug ";
1530 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1531 /* Use some locally assigned NAAs for SAS addresses. */
1532 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1533 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1534 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1536 /* Device identification VPD page. Returns number of bytes placed in arr */
1537 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1538 int target_dev_id, int dev_id_num,
1539 const char *dev_id_str, int dev_id_str_len,
1540 const uuid_t *lu_name)
1545 port_a = target_dev_id + 1;
1546 /* T10 vendor identifier field format (faked) */
1547 arr[0] = 0x2; /* ASCII */
1550 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1551 memcpy(&arr[12], sdebug_inq_product_id, 16);
1552 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1553 num = 8 + 16 + dev_id_str_len;
1556 if (dev_id_num >= 0) {
1557 if (sdebug_uuid_ctl) {
1558 /* Locally assigned UUID */
1559 arr[num++] = 0x1; /* binary (not necessarily sas) */
1560 arr[num++] = 0xa; /* PIV=0, lu, naa */
1563 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1565 memcpy(arr + num, lu_name, 16);
1568 /* NAA-3, Logical unit identifier (binary) */
1569 arr[num++] = 0x1; /* binary (not necessarily sas) */
1570 arr[num++] = 0x3; /* PIV=0, lu, naa */
1573 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1576 /* Target relative port number */
1577 arr[num++] = 0x61; /* proto=sas, binary */
1578 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1579 arr[num++] = 0x0; /* reserved */
1580 arr[num++] = 0x4; /* length */
1581 arr[num++] = 0x0; /* reserved */
1582 arr[num++] = 0x0; /* reserved */
1584 arr[num++] = 0x1; /* relative port A */
1586 /* NAA-3, Target port identifier */
1587 arr[num++] = 0x61; /* proto=sas, binary */
1588 arr[num++] = 0x93; /* piv=1, target port, naa */
1591 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1593 /* NAA-3, Target port group identifier */
1594 arr[num++] = 0x61; /* proto=sas, binary */
1595 arr[num++] = 0x95; /* piv=1, target port group id */
1600 put_unaligned_be16(port_group_id, arr + num);
1602 /* NAA-3, Target device identifier */
1603 arr[num++] = 0x61; /* proto=sas, binary */
1604 arr[num++] = 0xa3; /* piv=1, target device, naa */
1607 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1609 /* SCSI name string: Target device identifier */
1610 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1611 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1614 memcpy(arr + num, "naa.32222220", 12);
1616 snprintf(b, sizeof(b), "%08X", target_dev_id);
1617 memcpy(arr + num, b, 8);
1619 memset(arr + num, 0, 4);
1624 static unsigned char vpd84_data[] = {
1625 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1626 0x22,0x22,0x22,0x0,0xbb,0x1,
1627 0x22,0x22,0x22,0x0,0xbb,0x2,
1630 /* Software interface identification VPD page */
1631 static int inquiry_vpd_84(unsigned char *arr)
1633 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1634 return sizeof(vpd84_data);
1637 /* Management network addresses VPD page */
1638 static int inquiry_vpd_85(unsigned char *arr)
1641 const char *na1 = "https://www.kernel.org/config";
1642 const char *na2 = "http://www.kernel.org/log";
1645 arr[num++] = 0x1; /* lu, storage config */
1646 arr[num++] = 0x0; /* reserved */
1651 plen = ((plen / 4) + 1) * 4;
1652 arr[num++] = plen; /* length, null termianted, padded */
1653 memcpy(arr + num, na1, olen);
1654 memset(arr + num + olen, 0, plen - olen);
1657 arr[num++] = 0x4; /* lu, logging */
1658 arr[num++] = 0x0; /* reserved */
1663 plen = ((plen / 4) + 1) * 4;
1664 arr[num++] = plen; /* length, null terminated, padded */
1665 memcpy(arr + num, na2, olen);
1666 memset(arr + num + olen, 0, plen - olen);
1672 /* SCSI ports VPD page */
1673 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1678 port_a = target_dev_id + 1;
1679 port_b = port_a + 1;
1680 arr[num++] = 0x0; /* reserved */
1681 arr[num++] = 0x0; /* reserved */
1683 arr[num++] = 0x1; /* relative port 1 (primary) */
1684 memset(arr + num, 0, 6);
1687 arr[num++] = 12; /* length tp descriptor */
1688 /* naa-5 target port identifier (A) */
1689 arr[num++] = 0x61; /* proto=sas, binary */
1690 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1691 arr[num++] = 0x0; /* reserved */
1692 arr[num++] = 0x8; /* length */
1693 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1695 arr[num++] = 0x0; /* reserved */
1696 arr[num++] = 0x0; /* reserved */
1698 arr[num++] = 0x2; /* relative port 2 (secondary) */
1699 memset(arr + num, 0, 6);
1702 arr[num++] = 12; /* length tp descriptor */
1703 /* naa-5 target port identifier (B) */
1704 arr[num++] = 0x61; /* proto=sas, binary */
1705 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1706 arr[num++] = 0x0; /* reserved */
1707 arr[num++] = 0x8; /* length */
1708 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1715 static unsigned char vpd89_data[] = {
1716 /* from 4th byte */ 0,0,0,0,
1717 'l','i','n','u','x',' ',' ',' ',
1718 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1720 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1722 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1723 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1724 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1725 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1727 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1729 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1731 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1732 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1733 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1734 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1735 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1736 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1737 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1738 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1739 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1740 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1741 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1742 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1743 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1744 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1745 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1746 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1747 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1748 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1749 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1750 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1751 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1752 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1753 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1754 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1755 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1756 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1759 /* ATA Information VPD page */
1760 static int inquiry_vpd_89(unsigned char *arr)
1762 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1763 return sizeof(vpd89_data);
1767 static unsigned char vpdb0_data[] = {
1768 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1769 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1770 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1771 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1774 /* Block limits VPD page (SBC-3) */
1775 static int inquiry_vpd_b0(unsigned char *arr)
1779 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1781 /* Optimal transfer length granularity */
1782 if (sdebug_opt_xferlen_exp != 0 &&
1783 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1784 gran = 1 << sdebug_opt_xferlen_exp;
1786 gran = 1 << sdebug_physblk_exp;
1787 put_unaligned_be16(gran, arr + 2);
1789 /* Maximum Transfer Length */
1790 if (sdebug_store_sectors > 0x400)
1791 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1793 /* Optimal Transfer Length */
1794 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1797 /* Maximum Unmap LBA Count */
1798 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1800 /* Maximum Unmap Block Descriptor Count */
1801 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1804 /* Unmap Granularity Alignment */
1805 if (sdebug_unmap_alignment) {
1806 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1807 arr[28] |= 0x80; /* UGAVALID */
1810 /* Optimal Unmap Granularity */
1811 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1813 /* Maximum WRITE SAME Length */
1814 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1816 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1819 /* Block device characteristics VPD page (SBC-3) */
1820 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1822 memset(arr, 0, 0x3c);
1824 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1826 arr[3] = 5; /* less than 1.8" */
1831 /* Logical block provisioning VPD page (SBC-4) */
1832 static int inquiry_vpd_b2(unsigned char *arr)
1834 memset(arr, 0, 0x4);
1835 arr[0] = 0; /* threshold exponent */
1842 if (sdebug_lbprz && scsi_debug_lbp())
1843 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1844 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1845 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1846 /* threshold_percentage=0 */
1850 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1851 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1853 memset(arr, 0, 0x3c);
1854 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1856 * Set Optimal number of open sequential write preferred zones and
1857 * Optimal number of non-sequentially written sequential write
1858 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1859 * fields set to zero, apart from Max. number of open swrz_s field.
1861 put_unaligned_be32(0xffffffff, &arr[4]);
1862 put_unaligned_be32(0xffffffff, &arr[8]);
1863 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1864 put_unaligned_be32(devip->max_open, &arr[12]);
1866 put_unaligned_be32(0xffffffff, &arr[12]);
1867 if (devip->zcap < devip->zsize) {
1868 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1869 put_unaligned_be64(devip->zsize, &arr[20]);
1876 #define SDEBUG_BLE_LEN_AFTER_B4 28 /* thus vpage 32 bytes long */
1878 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
1880 /* Block limits extension VPD page (SBC-4) */
1881 static int inquiry_vpd_b7(unsigned char *arrb4)
1883 memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
1884 arrb4[1] = 1; /* Reduced stream control support (RSCS) */
1885 put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
1886 return SDEBUG_BLE_LEN_AFTER_B4;
1889 #define SDEBUG_LONG_INQ_SZ 96
1890 #define SDEBUG_MAX_INQ_ARR_SZ 584
1892 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1894 unsigned char pq_pdt;
1896 unsigned char *cmd = scp->cmnd;
1899 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1901 alloc_len = get_unaligned_be16(cmd + 3);
1902 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1904 return DID_REQUEUE << 16;
1905 is_disk = (sdebug_ptype == TYPE_DISK);
1906 is_zbc = devip->zoned;
1907 is_disk_zbc = (is_disk || is_zbc);
1908 have_wlun = scsi_is_wlun(scp->device->lun);
1910 pq_pdt = TYPE_WLUN; /* present, wlun */
1911 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1912 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1914 pq_pdt = (sdebug_ptype & 0x1f);
1916 if (0x2 & cmd[1]) { /* CMDDT bit set */
1917 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1919 return check_condition_result;
1920 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1921 int lu_id_num, port_group_id, target_dev_id;
1924 int host_no = devip->sdbg_host->shost->host_no;
1927 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1928 (devip->channel & 0x7f);
1929 if (sdebug_vpd_use_hostno == 0)
1931 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1932 (devip->target * 1000) + devip->lun);
1933 target_dev_id = ((host_no + 1) * 2000) +
1934 (devip->target * 1000) - 3;
1935 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1936 if (0 == cmd[2]) { /* supported vital product data pages */
1938 arr[n++] = 0x0; /* this page */
1939 arr[n++] = 0x80; /* unit serial number */
1940 arr[n++] = 0x83; /* device identification */
1941 arr[n++] = 0x84; /* software interface ident. */
1942 arr[n++] = 0x85; /* management network addresses */
1943 arr[n++] = 0x86; /* extended inquiry */
1944 arr[n++] = 0x87; /* mode page policy */
1945 arr[n++] = 0x88; /* SCSI ports */
1946 if (is_disk_zbc) { /* SBC or ZBC */
1947 arr[n++] = 0x89; /* ATA information */
1948 arr[n++] = 0xb0; /* Block limits */
1949 arr[n++] = 0xb1; /* Block characteristics */
1951 arr[n++] = 0xb2; /* LB Provisioning */
1953 arr[n++] = 0xb6; /* ZB dev. char. */
1954 arr[n++] = 0xb7; /* Block limits extension */
1956 arr[3] = n - 4; /* number of supported VPD pages */
1957 } else if (0x80 == cmd[2]) { /* unit serial number */
1959 memcpy(&arr[4], lu_id_str, len);
1960 } else if (0x83 == cmd[2]) { /* device identification */
1961 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1962 target_dev_id, lu_id_num,
1965 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1966 arr[3] = inquiry_vpd_84(&arr[4]);
1967 } else if (0x85 == cmd[2]) { /* Management network addresses */
1968 arr[3] = inquiry_vpd_85(&arr[4]);
1969 } else if (0x86 == cmd[2]) { /* extended inquiry */
1970 arr[3] = 0x3c; /* number of following entries */
1971 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1972 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1973 else if (have_dif_prot)
1974 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1976 arr[4] = 0x0; /* no protection stuff */
1978 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
1979 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
1982 } else if (0x87 == cmd[2]) { /* mode page policy */
1983 arr[3] = 0x8; /* number of following entries */
1984 arr[4] = 0x2; /* disconnect-reconnect mp */
1985 arr[6] = 0x80; /* mlus, shared */
1986 arr[8] = 0x18; /* protocol specific lu */
1987 arr[10] = 0x82; /* mlus, per initiator port */
1988 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1989 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1990 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1991 n = inquiry_vpd_89(&arr[4]);
1992 put_unaligned_be16(n, arr + 2);
1993 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1994 arr[3] = inquiry_vpd_b0(&arr[4]);
1995 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1996 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1997 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1998 arr[3] = inquiry_vpd_b2(&arr[4]);
1999 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2000 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2001 } else if (cmd[2] == 0xb7) { /* block limits extension page */
2002 arr[3] = inquiry_vpd_b7(&arr[4]);
2004 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2006 return check_condition_result;
2008 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2009 ret = fill_from_dev_buffer(scp, arr,
2010 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2014 /* drops through here for a standard inquiry */
2015 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
2016 arr[2] = sdebug_scsi_level;
2017 arr[3] = 2; /* response_data_format==2 */
2018 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2019 arr[5] = (int)have_dif_prot; /* PROTECT bit */
2020 if (sdebug_vpd_use_hostno == 0)
2021 arr[5] |= 0x10; /* claim: implicit TPGS */
2022 arr[6] = 0x10; /* claim: MultiP */
2023 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2024 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2025 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2026 memcpy(&arr[16], sdebug_inq_product_id, 16);
2027 memcpy(&arr[32], sdebug_inq_product_rev, 4);
2028 /* Use Vendor Specific area to place driver date in ASCII hex */
2029 memcpy(&arr[36], sdebug_version_date, 8);
2030 /* version descriptors (2 bytes each) follow */
2031 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
2032 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
2034 if (is_disk) { /* SBC-4 no version claimed */
2035 put_unaligned_be16(0x600, arr + n);
2037 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
2038 put_unaligned_be16(0x525, arr + n);
2040 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
2041 put_unaligned_be16(0x624, arr + n);
2044 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
2045 ret = fill_from_dev_buffer(scp, arr,
2046 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2051 /* See resp_iec_m_pg() for how this data is manipulated */
2052 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2055 static int resp_requests(struct scsi_cmnd *scp,
2056 struct sdebug_dev_info *devip)
2058 unsigned char *cmd = scp->cmnd;
2059 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
2060 bool dsense = !!(cmd[1] & 1);
2061 u32 alloc_len = cmd[4];
2063 int stopped_state = atomic_read(&devip->stopped);
2065 memset(arr, 0, sizeof(arr));
2066 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
2070 arr[2] = LOGICAL_UNIT_NOT_READY;
2071 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2075 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
2076 arr[7] = 0xa; /* 18 byte sense buffer */
2077 arr[12] = LOGICAL_UNIT_NOT_READY;
2078 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2080 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2081 /* Information exceptions control mode page: TEST=1, MRIE=6 */
2084 arr[1] = 0x0; /* NO_SENSE in sense_key */
2085 arr[2] = THRESHOLD_EXCEEDED;
2086 arr[3] = 0xff; /* Failure prediction(false) */
2090 arr[2] = 0x0; /* NO_SENSE in sense_key */
2091 arr[7] = 0xa; /* 18 byte sense buffer */
2092 arr[12] = THRESHOLD_EXCEEDED;
2093 arr[13] = 0xff; /* Failure prediction(false) */
2095 } else { /* nothing to report */
2098 memset(arr, 0, len);
2101 memset(arr, 0, len);
2106 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2109 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2111 unsigned char *cmd = scp->cmnd;
2112 int power_cond, want_stop, stopped_state;
2115 power_cond = (cmd[4] & 0xf0) >> 4;
2117 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2118 return check_condition_result;
2120 want_stop = !(cmd[4] & 1);
2121 stopped_state = atomic_read(&devip->stopped);
2122 if (stopped_state == 2) {
2123 ktime_t now_ts = ktime_get_boottime();
2125 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2126 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2128 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2129 /* tur_ms_to_ready timer extinguished */
2130 atomic_set(&devip->stopped, 0);
2134 if (stopped_state == 2) {
2136 stopped_state = 1; /* dummy up success */
2137 } else { /* Disallow tur_ms_to_ready delay to be overridden */
2138 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2139 return check_condition_result;
2143 changing = (stopped_state != want_stop);
2145 atomic_xchg(&devip->stopped, want_stop);
2146 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
2147 return SDEG_RES_IMMED_MASK;
2152 static sector_t get_sdebug_capacity(void)
2154 static const unsigned int gibibyte = 1073741824;
2156 if (sdebug_virtual_gb > 0)
2157 return (sector_t)sdebug_virtual_gb *
2158 (gibibyte / sdebug_sector_size);
2160 return sdebug_store_sectors;
2163 #define SDEBUG_READCAP_ARR_SZ 8
2164 static int resp_readcap(struct scsi_cmnd *scp,
2165 struct sdebug_dev_info *devip)
2167 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2170 /* following just in case virtual_gb changed */
2171 sdebug_capacity = get_sdebug_capacity();
2172 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2173 if (sdebug_capacity < 0xffffffff) {
2174 capac = (unsigned int)sdebug_capacity - 1;
2175 put_unaligned_be32(capac, arr + 0);
2177 put_unaligned_be32(0xffffffff, arr + 0);
2178 put_unaligned_be16(sdebug_sector_size, arr + 6);
2179 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2182 #define SDEBUG_READCAP16_ARR_SZ 32
2183 static int resp_readcap16(struct scsi_cmnd *scp,
2184 struct sdebug_dev_info *devip)
2186 unsigned char *cmd = scp->cmnd;
2187 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2190 alloc_len = get_unaligned_be32(cmd + 10);
2191 /* following just in case virtual_gb changed */
2192 sdebug_capacity = get_sdebug_capacity();
2193 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2194 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2195 put_unaligned_be32(sdebug_sector_size, arr + 8);
2196 arr[13] = sdebug_physblk_exp & 0xf;
2197 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2199 if (scsi_debug_lbp()) {
2200 arr[14] |= 0x80; /* LBPME */
2201 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2202 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2203 * in the wider field maps to 0 in this field.
2205 if (sdebug_lbprz & 1) /* precisely what the draft requires */
2210 * Since the scsi_debug READ CAPACITY implementation always reports the
2211 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2216 arr[15] = sdebug_lowest_aligned & 0xff;
2218 if (have_dif_prot) {
2219 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2220 arr[12] |= 1; /* PROT_EN */
2223 return fill_from_dev_buffer(scp, arr,
2224 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2227 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2229 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2230 struct sdebug_dev_info *devip)
2232 unsigned char *cmd = scp->cmnd;
2234 int host_no = devip->sdbg_host->shost->host_no;
2235 int port_group_a, port_group_b, port_a, port_b;
2239 alen = get_unaligned_be32(cmd + 6);
2240 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2242 return DID_REQUEUE << 16;
2244 * EVPD page 0x88 states we have two ports, one
2245 * real and a fake port with no device connected.
2246 * So we create two port groups with one port each
2247 * and set the group with port B to unavailable.
2249 port_a = 0x1; /* relative port A */
2250 port_b = 0x2; /* relative port B */
2251 port_group_a = (((host_no + 1) & 0x7f) << 8) +
2252 (devip->channel & 0x7f);
2253 port_group_b = (((host_no + 1) & 0x7f) << 8) +
2254 (devip->channel & 0x7f) + 0x80;
2257 * The asymmetric access state is cycled according to the host_id.
2260 if (sdebug_vpd_use_hostno == 0) {
2261 arr[n++] = host_no % 3; /* Asymm access state */
2262 arr[n++] = 0x0F; /* claim: all states are supported */
2264 arr[n++] = 0x0; /* Active/Optimized path */
2265 arr[n++] = 0x01; /* only support active/optimized paths */
2267 put_unaligned_be16(port_group_a, arr + n);
2269 arr[n++] = 0; /* Reserved */
2270 arr[n++] = 0; /* Status code */
2271 arr[n++] = 0; /* Vendor unique */
2272 arr[n++] = 0x1; /* One port per group */
2273 arr[n++] = 0; /* Reserved */
2274 arr[n++] = 0; /* Reserved */
2275 put_unaligned_be16(port_a, arr + n);
2277 arr[n++] = 3; /* Port unavailable */
2278 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2279 put_unaligned_be16(port_group_b, arr + n);
2281 arr[n++] = 0; /* Reserved */
2282 arr[n++] = 0; /* Status code */
2283 arr[n++] = 0; /* Vendor unique */
2284 arr[n++] = 0x1; /* One port per group */
2285 arr[n++] = 0; /* Reserved */
2286 arr[n++] = 0; /* Reserved */
2287 put_unaligned_be16(port_b, arr + n);
2291 put_unaligned_be32(rlen, arr + 0);
2294 * Return the smallest value of either
2295 * - The allocated length
2296 * - The constructed command length
2297 * - The maximum array size
2299 rlen = min(alen, n);
2300 ret = fill_from_dev_buffer(scp, arr,
2301 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2306 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2307 struct sdebug_dev_info *devip)
2310 u8 reporting_opts, req_opcode, sdeb_i, supp;
2312 u32 alloc_len, a_len;
2313 int k, offset, len, errsts, count, bump, na;
2314 const struct opcode_info_t *oip;
2315 const struct opcode_info_t *r_oip;
2317 u8 *cmd = scp->cmnd;
2319 rctd = !!(cmd[2] & 0x80);
2320 reporting_opts = cmd[2] & 0x7;
2321 req_opcode = cmd[3];
2322 req_sa = get_unaligned_be16(cmd + 4);
2323 alloc_len = get_unaligned_be32(cmd + 6);
2324 if (alloc_len < 4 || alloc_len > 0xffff) {
2325 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2326 return check_condition_result;
2328 if (alloc_len > 8192)
2332 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2334 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2336 return check_condition_result;
2338 switch (reporting_opts) {
2339 case 0: /* all commands */
2340 /* count number of commands */
2341 for (count = 0, oip = opcode_info_arr;
2342 oip->num_attached != 0xff; ++oip) {
2343 if (F_INV_OP & oip->flags)
2345 count += (oip->num_attached + 1);
2347 bump = rctd ? 20 : 8;
2348 put_unaligned_be32(count * bump, arr);
2349 for (offset = 4, oip = opcode_info_arr;
2350 oip->num_attached != 0xff && offset < a_len; ++oip) {
2351 if (F_INV_OP & oip->flags)
2353 na = oip->num_attached;
2354 arr[offset] = oip->opcode;
2355 put_unaligned_be16(oip->sa, arr + offset + 2);
2357 arr[offset + 5] |= 0x2;
2358 if (FF_SA & oip->flags)
2359 arr[offset + 5] |= 0x1;
2360 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2362 put_unaligned_be16(0xa, arr + offset + 8);
2364 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2365 if (F_INV_OP & oip->flags)
2368 arr[offset] = oip->opcode;
2369 put_unaligned_be16(oip->sa, arr + offset + 2);
2371 arr[offset + 5] |= 0x2;
2372 if (FF_SA & oip->flags)
2373 arr[offset + 5] |= 0x1;
2374 put_unaligned_be16(oip->len_mask[0],
2377 put_unaligned_be16(0xa,
2384 case 1: /* one command: opcode only */
2385 case 2: /* one command: opcode plus service action */
2386 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2387 sdeb_i = opcode_ind_arr[req_opcode];
2388 oip = &opcode_info_arr[sdeb_i];
2389 if (F_INV_OP & oip->flags) {
2393 if (1 == reporting_opts) {
2394 if (FF_SA & oip->flags) {
2395 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2398 return check_condition_result;
2401 } else if (2 == reporting_opts &&
2402 0 == (FF_SA & oip->flags)) {
2403 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2404 kfree(arr); /* point at requested sa */
2405 return check_condition_result;
2407 if (0 == (FF_SA & oip->flags) &&
2408 req_opcode == oip->opcode)
2410 else if (0 == (FF_SA & oip->flags)) {
2411 na = oip->num_attached;
2412 for (k = 0, oip = oip->arrp; k < na;
2414 if (req_opcode == oip->opcode)
2417 supp = (k >= na) ? 1 : 3;
2418 } else if (req_sa != oip->sa) {
2419 na = oip->num_attached;
2420 for (k = 0, oip = oip->arrp; k < na;
2422 if (req_sa == oip->sa)
2425 supp = (k >= na) ? 1 : 3;
2429 u = oip->len_mask[0];
2430 put_unaligned_be16(u, arr + 2);
2431 arr[4] = oip->opcode;
2432 for (k = 1; k < u; ++k)
2433 arr[4 + k] = (k < 16) ?
2434 oip->len_mask[k] : 0xff;
2439 arr[1] = (rctd ? 0x80 : 0) | supp;
2441 put_unaligned_be16(0xa, arr + offset);
2446 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2448 return check_condition_result;
2450 offset = (offset < a_len) ? offset : a_len;
2451 len = (offset < alloc_len) ? offset : alloc_len;
2452 errsts = fill_from_dev_buffer(scp, arr, len);
2457 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2458 struct sdebug_dev_info *devip)
2463 u8 *cmd = scp->cmnd;
2465 memset(arr, 0, sizeof(arr));
2466 repd = !!(cmd[2] & 0x80);
2467 alloc_len = get_unaligned_be32(cmd + 6);
2468 if (alloc_len < 4) {
2469 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2470 return check_condition_result;
2472 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2473 arr[1] = 0x1; /* ITNRS */
2480 len = (len < alloc_len) ? len : alloc_len;
2481 return fill_from_dev_buffer(scp, arr, len);
2484 /* <<Following mode page info copied from ST318451LW>> */
2486 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2487 { /* Read-Write Error Recovery page for mode_sense */
2488 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2491 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2493 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2494 return sizeof(err_recov_pg);
2497 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2498 { /* Disconnect-Reconnect page for mode_sense */
2499 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2500 0, 0, 0, 0, 0, 0, 0, 0};
2502 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2504 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2505 return sizeof(disconnect_pg);
2508 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2509 { /* Format device page for mode_sense */
2510 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2511 0, 0, 0, 0, 0, 0, 0, 0,
2512 0, 0, 0, 0, 0x40, 0, 0, 0};
2514 memcpy(p, format_pg, sizeof(format_pg));
2515 put_unaligned_be16(sdebug_sectors_per, p + 10);
2516 put_unaligned_be16(sdebug_sector_size, p + 12);
2517 if (sdebug_removable)
2518 p[20] |= 0x20; /* should agree with INQUIRY */
2520 memset(p + 2, 0, sizeof(format_pg) - 2);
2521 return sizeof(format_pg);
2524 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2525 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2528 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2529 { /* Caching page for mode_sense */
2530 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2531 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2532 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2533 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2535 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2536 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2537 memcpy(p, caching_pg, sizeof(caching_pg));
2539 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2540 else if (2 == pcontrol)
2541 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2542 return sizeof(caching_pg);
2545 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2548 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2549 { /* Control mode page for mode_sense */
2550 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2552 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2556 ctrl_m_pg[2] |= 0x4;
2558 ctrl_m_pg[2] &= ~0x4;
2561 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2563 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2565 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2566 else if (2 == pcontrol)
2567 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2568 return sizeof(ctrl_m_pg);
2571 /* IO Advice Hints Grouping mode page */
2572 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2574 /* IO Advice Hints Grouping mode page */
2575 struct grouping_m_pg {
2576 u8 page_code; /* OR 0x40 when subpage_code > 0 */
2580 struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2582 static const struct grouping_m_pg gr_m_pg = {
2583 .page_code = 0xa | 0x40,
2585 .page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2596 BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2597 16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2598 memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2599 if (1 == pcontrol) {
2600 /* There are no changeable values so clear from byte 4 on. */
2601 memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2603 return sizeof(gr_m_pg);
2606 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2607 { /* Informational Exceptions control mode page for mode_sense */
2608 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2610 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2613 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2615 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2616 else if (2 == pcontrol)
2617 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2618 return sizeof(iec_m_pg);
2621 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2622 { /* SAS SSP mode page - short format for mode_sense */
2623 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2624 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2626 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2628 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2629 return sizeof(sas_sf_m_pg);
2633 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2635 { /* SAS phy control and discover mode page for mode_sense */
2636 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2637 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2638 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2639 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2640 0x2, 0, 0, 0, 0, 0, 0, 0,
2641 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2642 0, 0, 0, 0, 0, 0, 0, 0,
2643 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2644 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2645 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2646 0x3, 0, 0, 0, 0, 0, 0, 0,
2647 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2648 0, 0, 0, 0, 0, 0, 0, 0,
2652 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2653 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2654 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2655 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2656 port_a = target_dev_id + 1;
2657 port_b = port_a + 1;
2658 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2659 put_unaligned_be32(port_a, p + 20);
2660 put_unaligned_be32(port_b, p + 48 + 20);
2662 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2663 return sizeof(sas_pcd_m_pg);
2666 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2667 { /* SAS SSP shared protocol specific port mode subpage */
2668 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2669 0, 0, 0, 0, 0, 0, 0, 0,
2672 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2674 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2675 return sizeof(sas_sha_m_pg);
2678 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2679 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2681 static int resp_mode_sense(struct scsi_cmnd *scp,
2682 struct sdebug_dev_info *devip)
2684 int pcontrol, pcode, subpcode, bd_len;
2685 unsigned char dev_spec;
2686 u32 alloc_len, offset, len;
2688 int target = scp->device->id;
2690 unsigned char *arr __free(kfree);
2691 unsigned char *cmd = scp->cmnd;
2692 bool dbd, llbaa, msense_6, is_disk, is_zbc;
2694 arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2697 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2698 pcontrol = (cmd[2] & 0xc0) >> 6;
2699 pcode = cmd[2] & 0x3f;
2701 msense_6 = (MODE_SENSE == cmd[0]);
2702 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2703 is_disk = (sdebug_ptype == TYPE_DISK);
2704 is_zbc = devip->zoned;
2705 if ((is_disk || is_zbc) && !dbd)
2706 bd_len = llbaa ? 16 : 8;
2709 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2710 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2711 if (0x3 == pcontrol) { /* Saving values not supported */
2712 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2713 return check_condition_result;
2715 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2716 (devip->target * 1000) - 3;
2717 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2718 if (is_disk || is_zbc) {
2719 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2731 arr[4] = 0x1; /* set LONGLBA bit */
2732 arr[7] = bd_len; /* assume 255 or less */
2736 if ((bd_len > 0) && (!sdebug_capacity))
2737 sdebug_capacity = get_sdebug_capacity();
2740 if (sdebug_capacity > 0xfffffffe)
2741 put_unaligned_be32(0xffffffff, ap + 0);
2743 put_unaligned_be32(sdebug_capacity, ap + 0);
2744 put_unaligned_be16(sdebug_sector_size, ap + 6);
2747 } else if (16 == bd_len) {
2748 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2749 put_unaligned_be32(sdebug_sector_size, ap + 12);
2755 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
2756 * len += resp_*_pg(ap + len, pcontrol, target);
2759 case 0x1: /* Read-Write error recovery page, direct access */
2760 if (subpcode > 0x0 && subpcode < 0xff)
2762 len = resp_err_recov_pg(ap, pcontrol, target);
2765 case 0x2: /* Disconnect-Reconnect page, all devices */
2766 if (subpcode > 0x0 && subpcode < 0xff)
2768 len = resp_disconnect_pg(ap, pcontrol, target);
2771 case 0x3: /* Format device page, direct access */
2772 if (subpcode > 0x0 && subpcode < 0xff)
2775 len = resp_format_pg(ap, pcontrol, target);
2781 case 0x8: /* Caching page, direct access */
2782 if (subpcode > 0x0 && subpcode < 0xff)
2784 if (is_disk || is_zbc) {
2785 len = resp_caching_pg(ap, pcontrol, target);
2791 case 0xa: /* Control Mode page, all devices */
2794 len = resp_ctrl_m_pg(ap, pcontrol, target);
2797 len = resp_grouping_m_pg(ap, pcontrol, target);
2800 len = resp_ctrl_m_pg(ap, pcontrol, target);
2801 len += resp_grouping_m_pg(ap + len, pcontrol, target);
2808 case 0x19: /* if spc==1 then sas phy, control+discover */
2809 if (subpcode > 0x2 && subpcode < 0xff)
2812 if ((0x0 == subpcode) || (0xff == subpcode))
2813 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2814 if ((0x1 == subpcode) || (0xff == subpcode))
2815 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2817 if ((0x2 == subpcode) || (0xff == subpcode))
2818 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2821 case 0x1c: /* Informational Exceptions Mode page, all devices */
2822 if (subpcode > 0x0 && subpcode < 0xff)
2824 len = resp_iec_m_pg(ap, pcontrol, target);
2827 case 0x3f: /* Read all Mode pages */
2828 if (subpcode > 0x0 && subpcode < 0xff)
2830 len = resp_err_recov_pg(ap, pcontrol, target);
2831 len += resp_disconnect_pg(ap + len, pcontrol, target);
2833 len += resp_format_pg(ap + len, pcontrol, target);
2834 len += resp_caching_pg(ap + len, pcontrol, target);
2835 } else if (is_zbc) {
2836 len += resp_caching_pg(ap + len, pcontrol, target);
2838 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2839 if (0xff == subpcode)
2840 len += resp_grouping_m_pg(ap + len, pcontrol, target);
2841 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2842 if (0xff == subpcode) {
2843 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2845 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2847 len += resp_iec_m_pg(ap + len, pcontrol, target);
2854 arr[0] = offset - 1;
2856 put_unaligned_be16((offset - 2), arr + 0);
2857 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2860 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2861 return check_condition_result;
2864 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2865 return check_condition_result;
2868 #define SDEBUG_MAX_MSELECT_SZ 512
2870 static int resp_mode_select(struct scsi_cmnd *scp,
2871 struct sdebug_dev_info *devip)
2873 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2874 int param_len, res, mpage;
2875 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2876 unsigned char *cmd = scp->cmnd;
2877 int mselect6 = (MODE_SELECT == cmd[0]);
2879 memset(arr, 0, sizeof(arr));
2882 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2883 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2884 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2885 return check_condition_result;
2887 res = fetch_to_dev_buffer(scp, arr, param_len);
2889 return DID_ERROR << 16;
2890 else if (sdebug_verbose && (res < param_len))
2891 sdev_printk(KERN_INFO, scp->device,
2892 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2893 __func__, param_len, res);
2894 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2895 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2896 off = bd_len + (mselect6 ? 4 : 8);
2897 if (md_len > 2 || off >= res) {
2898 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2899 return check_condition_result;
2901 mpage = arr[off] & 0x3f;
2902 ps = !!(arr[off] & 0x80);
2904 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2905 return check_condition_result;
2907 spf = !!(arr[off] & 0x40);
2908 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2910 if ((pg_len + off) > param_len) {
2911 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2912 PARAMETER_LIST_LENGTH_ERR, 0);
2913 return check_condition_result;
2916 case 0x8: /* Caching Mode page */
2917 if (caching_pg[1] == arr[off + 1]) {
2918 memcpy(caching_pg + 2, arr + off + 2,
2919 sizeof(caching_pg) - 2);
2920 goto set_mode_changed_ua;
2923 case 0xa: /* Control Mode page */
2924 if (ctrl_m_pg[1] == arr[off + 1]) {
2925 memcpy(ctrl_m_pg + 2, arr + off + 2,
2926 sizeof(ctrl_m_pg) - 2);
2927 if (ctrl_m_pg[4] & 0x8)
2931 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2932 goto set_mode_changed_ua;
2935 case 0x1c: /* Informational Exceptions Mode page */
2936 if (iec_m_pg[1] == arr[off + 1]) {
2937 memcpy(iec_m_pg + 2, arr + off + 2,
2938 sizeof(iec_m_pg) - 2);
2939 goto set_mode_changed_ua;
2945 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2946 return check_condition_result;
2947 set_mode_changed_ua:
2948 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2952 static int resp_temp_l_pg(unsigned char *arr)
2954 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2955 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2958 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2959 return sizeof(temp_l_pg);
2962 static int resp_ie_l_pg(unsigned char *arr)
2964 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2967 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2968 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2969 arr[4] = THRESHOLD_EXCEEDED;
2972 return sizeof(ie_l_pg);
2975 static int resp_env_rep_l_spg(unsigned char *arr)
2977 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2978 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2979 0x1, 0x0, 0x23, 0x8,
2980 0x0, 55, 72, 35, 55, 45, 0, 0,
2983 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2984 return sizeof(env_rep_l_spg);
2987 #define SDEBUG_MAX_LSENSE_SZ 512
2989 static int resp_log_sense(struct scsi_cmnd *scp,
2990 struct sdebug_dev_info *devip)
2992 int ppc, sp, pcode, subpcode;
2993 u32 alloc_len, len, n;
2994 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2995 unsigned char *cmd = scp->cmnd;
2997 memset(arr, 0, sizeof(arr));
3001 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3002 return check_condition_result;
3004 pcode = cmd[2] & 0x3f;
3005 subpcode = cmd[3] & 0xff;
3006 alloc_len = get_unaligned_be16(cmd + 7);
3008 if (0 == subpcode) {
3010 case 0x0: /* Supported log pages log page */
3012 arr[n++] = 0x0; /* this page */
3013 arr[n++] = 0xd; /* Temperature */
3014 arr[n++] = 0x2f; /* Informational exceptions */
3017 case 0xd: /* Temperature log page */
3018 arr[3] = resp_temp_l_pg(arr + 4);
3020 case 0x2f: /* Informational exceptions log page */
3021 arr[3] = resp_ie_l_pg(arr + 4);
3024 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3025 return check_condition_result;
3027 } else if (0xff == subpcode) {
3031 case 0x0: /* Supported log pages and subpages log page */
3034 arr[n++] = 0x0; /* 0,0 page */
3036 arr[n++] = 0xff; /* this page */
3038 arr[n++] = 0x0; /* Temperature */
3040 arr[n++] = 0x1; /* Environment reporting */
3042 arr[n++] = 0xff; /* all 0xd subpages */
3044 arr[n++] = 0x0; /* Informational exceptions */
3046 arr[n++] = 0xff; /* all 0x2f subpages */
3049 case 0xd: /* Temperature subpages */
3052 arr[n++] = 0x0; /* Temperature */
3054 arr[n++] = 0x1; /* Environment reporting */
3056 arr[n++] = 0xff; /* these subpages */
3059 case 0x2f: /* Informational exceptions subpages */
3062 arr[n++] = 0x0; /* Informational exceptions */
3064 arr[n++] = 0xff; /* these subpages */
3068 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3069 return check_condition_result;
3071 } else if (subpcode > 0) {
3074 if (pcode == 0xd && subpcode == 1)
3075 arr[3] = resp_env_rep_l_spg(arr + 4);
3077 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3078 return check_condition_result;
3081 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3082 return check_condition_result;
3084 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3085 return fill_from_dev_buffer(scp, arr,
3086 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3089 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3091 return devip->nr_zones != 0;
3094 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3095 unsigned long long lba)
3097 u32 zno = lba >> devip->zsize_shift;
3098 struct sdeb_zone_state *zsp;
3100 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3101 return &devip->zstate[zno];
3104 * If the zone capacity is less than the zone size, adjust for gap
3107 zno = 2 * zno - devip->nr_conv_zones;
3108 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3109 zsp = &devip->zstate[zno];
3110 if (lba >= zsp->z_start + zsp->z_size)
3112 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3116 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3118 return zsp->z_type == ZBC_ZTYPE_CNV;
3121 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3123 return zsp->z_type == ZBC_ZTYPE_GAP;
3126 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3128 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3131 static void zbc_close_zone(struct sdebug_dev_info *devip,
3132 struct sdeb_zone_state *zsp)
3134 enum sdebug_z_cond zc;
3136 if (!zbc_zone_is_seq(zsp))
3140 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3143 if (zc == ZC2_IMPLICIT_OPEN)
3144 devip->nr_imp_open--;
3146 devip->nr_exp_open--;
3148 if (zsp->z_wp == zsp->z_start) {
3149 zsp->z_cond = ZC1_EMPTY;
3151 zsp->z_cond = ZC4_CLOSED;
3156 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3158 struct sdeb_zone_state *zsp = &devip->zstate[0];
3161 for (i = 0; i < devip->nr_zones; i++, zsp++) {
3162 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3163 zbc_close_zone(devip, zsp);
3169 static void zbc_open_zone(struct sdebug_dev_info *devip,
3170 struct sdeb_zone_state *zsp, bool explicit)
3172 enum sdebug_z_cond zc;
3174 if (!zbc_zone_is_seq(zsp))
3178 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3179 (!explicit && zc == ZC2_IMPLICIT_OPEN))
3182 /* Close an implicit open zone if necessary */
3183 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3184 zbc_close_zone(devip, zsp);
3185 else if (devip->max_open &&
3186 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3187 zbc_close_imp_open_zone(devip);
3189 if (zsp->z_cond == ZC4_CLOSED)
3192 zsp->z_cond = ZC3_EXPLICIT_OPEN;
3193 devip->nr_exp_open++;
3195 zsp->z_cond = ZC2_IMPLICIT_OPEN;
3196 devip->nr_imp_open++;
3200 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3201 struct sdeb_zone_state *zsp)
3203 switch (zsp->z_cond) {
3204 case ZC2_IMPLICIT_OPEN:
3205 devip->nr_imp_open--;
3207 case ZC3_EXPLICIT_OPEN:
3208 devip->nr_exp_open--;
3211 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3212 zsp->z_start, zsp->z_cond);
3215 zsp->z_cond = ZC5_FULL;
3218 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3219 unsigned long long lba, unsigned int num)
3221 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3222 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3224 if (!zbc_zone_is_seq(zsp))
3227 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3229 if (zsp->z_wp >= zend)
3230 zbc_set_zone_full(devip, zsp);
3235 if (lba != zsp->z_wp)
3236 zsp->z_non_seq_resource = true;
3242 } else if (end > zsp->z_wp) {
3248 if (zsp->z_wp >= zend)
3249 zbc_set_zone_full(devip, zsp);
3255 zend = zsp->z_start + zsp->z_size;
3260 static int check_zbc_access_params(struct scsi_cmnd *scp,
3261 unsigned long long lba, unsigned int num, bool write)
3263 struct scsi_device *sdp = scp->device;
3264 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3265 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3266 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3269 /* For host-managed, reads cannot cross zone types boundaries */
3270 if (zsp->z_type != zsp_end->z_type) {
3271 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3274 return check_condition_result;
3279 /* Writing into a gap zone is not allowed */
3280 if (zbc_zone_is_gap(zsp)) {
3281 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3282 ATTEMPT_ACCESS_GAP);
3283 return check_condition_result;
3286 /* No restrictions for writes within conventional zones */
3287 if (zbc_zone_is_conv(zsp)) {
3288 if (!zbc_zone_is_conv(zsp_end)) {
3289 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3291 WRITE_BOUNDARY_ASCQ);
3292 return check_condition_result;
3297 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3298 /* Writes cannot cross sequential zone boundaries */
3299 if (zsp_end != zsp) {
3300 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3302 WRITE_BOUNDARY_ASCQ);
3303 return check_condition_result;
3305 /* Cannot write full zones */
3306 if (zsp->z_cond == ZC5_FULL) {
3307 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3308 INVALID_FIELD_IN_CDB, 0);
3309 return check_condition_result;
3311 /* Writes must be aligned to the zone WP */
3312 if (lba != zsp->z_wp) {
3313 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3315 UNALIGNED_WRITE_ASCQ);
3316 return check_condition_result;
3320 /* Handle implicit open of closed and empty zones */
3321 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3322 if (devip->max_open &&
3323 devip->nr_exp_open >= devip->max_open) {
3324 mk_sense_buffer(scp, DATA_PROTECT,
3327 return check_condition_result;
3329 zbc_open_zone(devip, zsp, false);
3335 static inline int check_device_access_params
3336 (struct scsi_cmnd *scp, unsigned long long lba,
3337 unsigned int num, bool write)
3339 struct scsi_device *sdp = scp->device;
3340 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3342 if (lba + num > sdebug_capacity) {
3343 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3344 return check_condition_result;
3346 /* transfer length excessive (tie in to block limits VPD page) */
3347 if (num > sdebug_store_sectors) {
3348 /* needs work to find which cdb byte 'num' comes from */
3349 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3350 return check_condition_result;
3352 if (write && unlikely(sdebug_wp)) {
3353 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3354 return check_condition_result;
3356 if (sdebug_dev_is_zoned(devip))
3357 return check_zbc_access_params(scp, lba, num, write);
3363 * Note: if BUG_ON() fires it usually indicates a problem with the parser
3364 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3365 * that access any of the "stores" in struct sdeb_store_info should call this
3366 * function with bug_if_fake_rw set to true.
3368 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3369 bool bug_if_fake_rw)
3371 if (sdebug_fake_rw) {
3372 BUG_ON(bug_if_fake_rw); /* See note above */
3375 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3378 /* Returns number of bytes copied or -1 if error. */
3379 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3380 u32 sg_skip, u64 lba, u32 num, bool do_write)
3383 u64 block, rest = 0;
3384 enum dma_data_direction dir;
3385 struct scsi_data_buffer *sdb = &scp->sdb;
3389 dir = DMA_TO_DEVICE;
3390 write_since_sync = true;
3392 dir = DMA_FROM_DEVICE;
3395 if (!sdb->length || !sip)
3397 if (scp->sc_data_direction != dir)
3401 block = do_div(lba, sdebug_store_sectors);
3402 if (block + num > sdebug_store_sectors)
3403 rest = block + num - sdebug_store_sectors;
3405 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3406 fsp + (block * sdebug_sector_size),
3407 (num - rest) * sdebug_sector_size, sg_skip, do_write);
3408 if (ret != (num - rest) * sdebug_sector_size)
3412 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3413 fsp, rest * sdebug_sector_size,
3414 sg_skip + ((num - rest) * sdebug_sector_size),
3421 /* Returns number of bytes copied or -1 if error. */
3422 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3424 struct scsi_data_buffer *sdb = &scp->sdb;
3428 if (scp->sc_data_direction != DMA_TO_DEVICE)
3430 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3431 num * sdebug_sector_size, 0, true);
3434 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3435 * arr into sip->storep+lba and return true. If comparison fails then
3437 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3438 const u8 *arr, bool compare_only)
3441 u64 block, rest = 0;
3442 u32 store_blks = sdebug_store_sectors;
3443 u32 lb_size = sdebug_sector_size;
3444 u8 *fsp = sip->storep;
3446 block = do_div(lba, store_blks);
3447 if (block + num > store_blks)
3448 rest = block + num - store_blks;
3450 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3454 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3460 arr += num * lb_size;
3461 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3463 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3467 static __be16 dif_compute_csum(const void *buf, int len)
3472 csum = (__force __be16)ip_compute_csum(buf, len);
3474 csum = cpu_to_be16(crc_t10dif(buf, len));
3479 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3480 sector_t sector, u32 ei_lba)
3482 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3484 if (sdt->guard_tag != csum) {
3485 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3486 (unsigned long)sector,
3487 be16_to_cpu(sdt->guard_tag),
3491 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3492 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3493 pr_err("REF check failed on sector %lu\n",
3494 (unsigned long)sector);
3497 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3498 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3499 pr_err("REF check failed on sector %lu\n",
3500 (unsigned long)sector);
3506 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3507 unsigned int sectors, bool read)
3511 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3512 scp->device->hostdata, true);
3513 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3514 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3515 struct sg_mapping_iter miter;
3517 /* Bytes of protection data to copy into sgl */
3518 resid = sectors * sizeof(*dif_storep);
3520 sg_miter_start(&miter, scsi_prot_sglist(scp),
3521 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3522 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3524 while (sg_miter_next(&miter) && resid > 0) {
3525 size_t len = min_t(size_t, miter.length, resid);
3526 void *start = dif_store(sip, sector);
3529 if (dif_store_end < start + len)
3530 rest = start + len - dif_store_end;
3535 memcpy(paddr, start, len - rest);
3537 memcpy(start, paddr, len - rest);
3541 memcpy(paddr + len - rest, dif_storep, rest);
3543 memcpy(dif_storep, paddr + len - rest, rest);
3546 sector += len / sizeof(*dif_storep);
3549 sg_miter_stop(&miter);
3552 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3553 unsigned int sectors, u32 ei_lba)
3558 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3559 scp->device->hostdata, true);
3560 struct t10_pi_tuple *sdt;
3562 for (i = 0; i < sectors; i++, ei_lba++) {
3563 sector = start_sec + i;
3564 sdt = dif_store(sip, sector);
3566 if (sdt->app_tag == cpu_to_be16(0xffff))
3570 * Because scsi_debug acts as both initiator and
3571 * target we proceed to verify the PI even if
3572 * RDPROTECT=3. This is done so the "initiator" knows
3573 * which type of error to return. Otherwise we would
3574 * have to iterate over the PI twice.
3576 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3577 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3586 dif_copy_prot(scp, start_sec, sectors, true);
3593 sdeb_read_lock(struct sdeb_store_info *sip)
3595 if (sdebug_no_rwlock) {
3597 __acquire(&sip->macc_lck);
3599 __acquire(&sdeb_fake_rw_lck);
3602 read_lock(&sip->macc_lck);
3604 read_lock(&sdeb_fake_rw_lck);
3609 sdeb_read_unlock(struct sdeb_store_info *sip)
3611 if (sdebug_no_rwlock) {
3613 __release(&sip->macc_lck);
3615 __release(&sdeb_fake_rw_lck);
3618 read_unlock(&sip->macc_lck);
3620 read_unlock(&sdeb_fake_rw_lck);
3625 sdeb_write_lock(struct sdeb_store_info *sip)
3627 if (sdebug_no_rwlock) {
3629 __acquire(&sip->macc_lck);
3631 __acquire(&sdeb_fake_rw_lck);
3634 write_lock(&sip->macc_lck);
3636 write_lock(&sdeb_fake_rw_lck);
3641 sdeb_write_unlock(struct sdeb_store_info *sip)
3643 if (sdebug_no_rwlock) {
3645 __release(&sip->macc_lck);
3647 __release(&sdeb_fake_rw_lck);
3650 write_unlock(&sip->macc_lck);
3652 write_unlock(&sdeb_fake_rw_lck);
3656 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3663 struct sdeb_store_info *sip = devip2sip(devip, true);
3664 u8 *cmd = scp->cmnd;
3669 lba = get_unaligned_be64(cmd + 2);
3670 num = get_unaligned_be32(cmd + 10);
3675 lba = get_unaligned_be32(cmd + 2);
3676 num = get_unaligned_be16(cmd + 7);
3681 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3682 (u32)(cmd[1] & 0x1f) << 16;
3683 num = (0 == cmd[4]) ? 256 : cmd[4];
3688 lba = get_unaligned_be32(cmd + 2);
3689 num = get_unaligned_be32(cmd + 6);
3692 case XDWRITEREAD_10:
3694 lba = get_unaligned_be32(cmd + 2);
3695 num = get_unaligned_be16(cmd + 7);
3698 default: /* assume READ(32) */
3699 lba = get_unaligned_be64(cmd + 12);
3700 ei_lba = get_unaligned_be32(cmd + 20);
3701 num = get_unaligned_be32(cmd + 28);
3705 if (unlikely(have_dif_prot && check_prot)) {
3706 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3708 mk_sense_invalid_opcode(scp);
3709 return check_condition_result;
3711 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3712 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3713 (cmd[1] & 0xe0) == 0)
3714 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3717 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3718 atomic_read(&sdeb_inject_pending))) {
3720 atomic_set(&sdeb_inject_pending, 0);
3723 ret = check_device_access_params(scp, lba, num, false);
3726 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3727 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3728 ((lba + num) > sdebug_medium_error_start))) {
3729 /* claim unrecoverable read error */
3730 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3731 /* set info field and valid bit for fixed descriptor */
3732 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3733 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3734 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3735 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3736 put_unaligned_be32(ret, scp->sense_buffer + 3);
3738 scsi_set_resid(scp, scsi_bufflen(scp));
3739 return check_condition_result;
3742 sdeb_read_lock(sip);
3745 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3746 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3747 case 1: /* Guard tag error */
3748 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3749 sdeb_read_unlock(sip);
3750 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3751 return check_condition_result;
3752 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3753 sdeb_read_unlock(sip);
3754 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3755 return illegal_condition_result;
3758 case 3: /* Reference tag error */
3759 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3760 sdeb_read_unlock(sip);
3761 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3762 return check_condition_result;
3763 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3764 sdeb_read_unlock(sip);
3765 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3766 return illegal_condition_result;
3772 ret = do_device_access(sip, scp, 0, lba, num, false);
3773 sdeb_read_unlock(sip);
3774 if (unlikely(ret == -1))
3775 return DID_ERROR << 16;
3777 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3779 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3780 atomic_read(&sdeb_inject_pending))) {
3781 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3782 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3783 atomic_set(&sdeb_inject_pending, 0);
3784 return check_condition_result;
3785 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3786 /* Logical block guard check failed */
3787 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3788 atomic_set(&sdeb_inject_pending, 0);
3789 return illegal_condition_result;
3790 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3791 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3792 atomic_set(&sdeb_inject_pending, 0);
3793 return illegal_condition_result;
3799 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3800 unsigned int sectors, u32 ei_lba)
3803 struct t10_pi_tuple *sdt;
3805 sector_t sector = start_sec;
3808 struct sg_mapping_iter diter;
3809 struct sg_mapping_iter piter;
3811 BUG_ON(scsi_sg_count(SCpnt) == 0);
3812 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3814 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3815 scsi_prot_sg_count(SCpnt),
3816 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3817 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3818 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3820 /* For each protection page */
3821 while (sg_miter_next(&piter)) {
3823 if (WARN_ON(!sg_miter_next(&diter))) {
3828 for (ppage_offset = 0; ppage_offset < piter.length;
3829 ppage_offset += sizeof(struct t10_pi_tuple)) {
3830 /* If we're at the end of the current
3831 * data page advance to the next one
3833 if (dpage_offset >= diter.length) {
3834 if (WARN_ON(!sg_miter_next(&diter))) {
3841 sdt = piter.addr + ppage_offset;
3842 daddr = diter.addr + dpage_offset;
3844 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3845 ret = dif_verify(sdt, daddr, sector, ei_lba);
3852 dpage_offset += sdebug_sector_size;
3854 diter.consumed = dpage_offset;
3855 sg_miter_stop(&diter);
3857 sg_miter_stop(&piter);
3859 dif_copy_prot(SCpnt, start_sec, sectors, false);
3866 sg_miter_stop(&diter);
3867 sg_miter_stop(&piter);
3871 static unsigned long lba_to_map_index(sector_t lba)
3873 if (sdebug_unmap_alignment)
3874 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3875 sector_div(lba, sdebug_unmap_granularity);
3879 static sector_t map_index_to_lba(unsigned long index)
3881 sector_t lba = index * sdebug_unmap_granularity;
3883 if (sdebug_unmap_alignment)
3884 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3888 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3892 unsigned int mapped;
3893 unsigned long index;
3896 index = lba_to_map_index(lba);
3897 mapped = test_bit(index, sip->map_storep);
3900 next = find_next_zero_bit(sip->map_storep, map_size, index);
3902 next = find_next_bit(sip->map_storep, map_size, index);
3904 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3909 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3912 sector_t end = lba + len;
3915 unsigned long index = lba_to_map_index(lba);
3917 if (index < map_size)
3918 set_bit(index, sip->map_storep);
3920 lba = map_index_to_lba(index + 1);
3924 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3927 sector_t end = lba + len;
3928 u8 *fsp = sip->storep;
3931 unsigned long index = lba_to_map_index(lba);
3933 if (lba == map_index_to_lba(index) &&
3934 lba + sdebug_unmap_granularity <= end &&
3936 clear_bit(index, sip->map_storep);
3937 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3938 memset(fsp + lba * sdebug_sector_size,
3939 (sdebug_lbprz & 1) ? 0 : 0xff,
3940 sdebug_sector_size *
3941 sdebug_unmap_granularity);
3943 if (sip->dif_storep) {
3944 memset(sip->dif_storep + lba, 0xff,
3945 sizeof(*sip->dif_storep) *
3946 sdebug_unmap_granularity);
3949 lba = map_index_to_lba(index + 1);
3953 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3960 struct sdeb_store_info *sip = devip2sip(devip, true);
3961 u8 *cmd = scp->cmnd;
3966 lba = get_unaligned_be64(cmd + 2);
3967 num = get_unaligned_be32(cmd + 10);
3972 lba = get_unaligned_be32(cmd + 2);
3973 num = get_unaligned_be16(cmd + 7);
3978 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3979 (u32)(cmd[1] & 0x1f) << 16;
3980 num = (0 == cmd[4]) ? 256 : cmd[4];
3985 lba = get_unaligned_be32(cmd + 2);
3986 num = get_unaligned_be32(cmd + 6);
3989 case 0x53: /* XDWRITEREAD(10) */
3991 lba = get_unaligned_be32(cmd + 2);
3992 num = get_unaligned_be16(cmd + 7);
3995 default: /* assume WRITE(32) */
3996 lba = get_unaligned_be64(cmd + 12);
3997 ei_lba = get_unaligned_be32(cmd + 20);
3998 num = get_unaligned_be32(cmd + 28);
4002 if (unlikely(have_dif_prot && check_prot)) {
4003 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4005 mk_sense_invalid_opcode(scp);
4006 return check_condition_result;
4008 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4009 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4010 (cmd[1] & 0xe0) == 0)
4011 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4015 sdeb_write_lock(sip);
4016 ret = check_device_access_params(scp, lba, num, true);
4018 sdeb_write_unlock(sip);
4023 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4024 switch (prot_verify_write(scp, lba, num, ei_lba)) {
4025 case 1: /* Guard tag error */
4026 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4027 sdeb_write_unlock(sip);
4028 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4029 return illegal_condition_result;
4030 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4031 sdeb_write_unlock(sip);
4032 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4033 return check_condition_result;
4036 case 3: /* Reference tag error */
4037 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4038 sdeb_write_unlock(sip);
4039 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4040 return illegal_condition_result;
4041 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4042 sdeb_write_unlock(sip);
4043 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4044 return check_condition_result;
4050 ret = do_device_access(sip, scp, 0, lba, num, true);
4051 if (unlikely(scsi_debug_lbp()))
4052 map_region(sip, lba, num);
4053 /* If ZBC zone then bump its write pointer */
4054 if (sdebug_dev_is_zoned(devip))
4055 zbc_inc_wp(devip, lba, num);
4056 sdeb_write_unlock(sip);
4057 if (unlikely(-1 == ret))
4058 return DID_ERROR << 16;
4059 else if (unlikely(sdebug_verbose &&
4060 (ret < (num * sdebug_sector_size))))
4061 sdev_printk(KERN_INFO, scp->device,
4062 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4063 my_name, num * sdebug_sector_size, ret);
4065 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4066 atomic_read(&sdeb_inject_pending))) {
4067 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4068 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4069 atomic_set(&sdeb_inject_pending, 0);
4070 return check_condition_result;
4071 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4072 /* Logical block guard check failed */
4073 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4074 atomic_set(&sdeb_inject_pending, 0);
4075 return illegal_condition_result;
4076 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4077 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4078 atomic_set(&sdeb_inject_pending, 0);
4079 return illegal_condition_result;
4086 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4087 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4089 static int resp_write_scat(struct scsi_cmnd *scp,
4090 struct sdebug_dev_info *devip)
4092 u8 *cmd = scp->cmnd;
4095 struct sdeb_store_info *sip = devip2sip(devip, true);
4097 u16 lbdof, num_lrd, k;
4098 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4099 u32 lb_size = sdebug_sector_size;
4104 static const u32 lrd_size = 32; /* + parameter list header size */
4106 if (cmd[0] == VARIABLE_LENGTH_CMD) {
4108 wrprotect = (cmd[10] >> 5) & 0x7;
4109 lbdof = get_unaligned_be16(cmd + 12);
4110 num_lrd = get_unaligned_be16(cmd + 16);
4111 bt_len = get_unaligned_be32(cmd + 28);
4112 } else { /* that leaves WRITE SCATTERED(16) */
4114 wrprotect = (cmd[2] >> 5) & 0x7;
4115 lbdof = get_unaligned_be16(cmd + 4);
4116 num_lrd = get_unaligned_be16(cmd + 8);
4117 bt_len = get_unaligned_be32(cmd + 10);
4118 if (unlikely(have_dif_prot)) {
4119 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4121 mk_sense_invalid_opcode(scp);
4122 return illegal_condition_result;
4124 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4125 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4127 sdev_printk(KERN_ERR, scp->device,
4128 "Unprotected WR to DIF device\n");
4131 if ((num_lrd == 0) || (bt_len == 0))
4132 return 0; /* T10 says these do-nothings are not errors */
4135 sdev_printk(KERN_INFO, scp->device,
4136 "%s: %s: LB Data Offset field bad\n",
4138 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4139 return illegal_condition_result;
4141 lbdof_blen = lbdof * lb_size;
4142 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4144 sdev_printk(KERN_INFO, scp->device,
4145 "%s: %s: LBA range descriptors don't fit\n",
4147 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4148 return illegal_condition_result;
4150 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4152 return SCSI_MLQUEUE_HOST_BUSY;
4154 sdev_printk(KERN_INFO, scp->device,
4155 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4156 my_name, __func__, lbdof_blen);
4157 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4159 ret = DID_ERROR << 16;
4163 sdeb_write_lock(sip);
4164 sg_off = lbdof_blen;
4165 /* Spec says Buffer xfer Length field in number of LBs in dout */
4167 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4168 lba = get_unaligned_be64(up + 0);
4169 num = get_unaligned_be32(up + 8);
4171 sdev_printk(KERN_INFO, scp->device,
4172 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
4173 my_name, __func__, k, lba, num, sg_off);
4176 ret = check_device_access_params(scp, lba, num, true);
4178 goto err_out_unlock;
4179 num_by = num * lb_size;
4180 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4182 if ((cum_lb + num) > bt_len) {
4184 sdev_printk(KERN_INFO, scp->device,
4185 "%s: %s: sum of blocks > data provided\n",
4187 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4189 ret = illegal_condition_result;
4190 goto err_out_unlock;
4194 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4195 int prot_ret = prot_verify_write(scp, lba, num,
4199 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4201 ret = illegal_condition_result;
4202 goto err_out_unlock;
4206 ret = do_device_access(sip, scp, sg_off, lba, num, true);
4207 /* If ZBC zone then bump its write pointer */
4208 if (sdebug_dev_is_zoned(devip))
4209 zbc_inc_wp(devip, lba, num);
4210 if (unlikely(scsi_debug_lbp()))
4211 map_region(sip, lba, num);
4212 if (unlikely(-1 == ret)) {
4213 ret = DID_ERROR << 16;
4214 goto err_out_unlock;
4215 } else if (unlikely(sdebug_verbose && (ret < num_by)))
4216 sdev_printk(KERN_INFO, scp->device,
4217 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4218 my_name, num_by, ret);
4220 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4221 atomic_read(&sdeb_inject_pending))) {
4222 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4223 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4224 atomic_set(&sdeb_inject_pending, 0);
4225 ret = check_condition_result;
4226 goto err_out_unlock;
4227 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4228 /* Logical block guard check failed */
4229 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4230 atomic_set(&sdeb_inject_pending, 0);
4231 ret = illegal_condition_result;
4232 goto err_out_unlock;
4233 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4234 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4235 atomic_set(&sdeb_inject_pending, 0);
4236 ret = illegal_condition_result;
4237 goto err_out_unlock;
4245 sdeb_write_unlock(sip);
4251 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4252 u32 ei_lba, bool unmap, bool ndob)
4254 struct scsi_device *sdp = scp->device;
4255 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4256 unsigned long long i;
4258 u32 lb_size = sdebug_sector_size;
4260 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4261 scp->device->hostdata, true);
4265 sdeb_write_lock(sip);
4267 ret = check_device_access_params(scp, lba, num, true);
4269 sdeb_write_unlock(sip);
4273 if (unmap && scsi_debug_lbp()) {
4274 unmap_region(sip, lba, num);
4278 block = do_div(lbaa, sdebug_store_sectors);
4279 /* if ndob then zero 1 logical block, else fetch 1 logical block */
4281 fs1p = fsp + (block * lb_size);
4283 memset(fs1p, 0, lb_size);
4286 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4289 sdeb_write_unlock(sip);
4290 return DID_ERROR << 16;
4291 } else if (sdebug_verbose && !ndob && (ret < lb_size))
4292 sdev_printk(KERN_INFO, scp->device,
4293 "%s: %s: lb size=%u, IO sent=%d bytes\n",
4294 my_name, "write same", lb_size, ret);
4296 /* Copy first sector to remaining blocks */
4297 for (i = 1 ; i < num ; i++) {
4299 block = do_div(lbaa, sdebug_store_sectors);
4300 memmove(fsp + (block * lb_size), fs1p, lb_size);
4302 if (scsi_debug_lbp())
4303 map_region(sip, lba, num);
4304 /* If ZBC zone then bump its write pointer */
4305 if (sdebug_dev_is_zoned(devip))
4306 zbc_inc_wp(devip, lba, num);
4308 sdeb_write_unlock(sip);
4313 static int resp_write_same_10(struct scsi_cmnd *scp,
4314 struct sdebug_dev_info *devip)
4316 u8 *cmd = scp->cmnd;
4323 if (sdebug_lbpws10 == 0) {
4324 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4325 return check_condition_result;
4329 lba = get_unaligned_be32(cmd + 2);
4330 num = get_unaligned_be16(cmd + 7);
4331 if (num > sdebug_write_same_length) {
4332 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4333 return check_condition_result;
4335 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4338 static int resp_write_same_16(struct scsi_cmnd *scp,
4339 struct sdebug_dev_info *devip)
4341 u8 *cmd = scp->cmnd;
4348 if (cmd[1] & 0x8) { /* UNMAP */
4349 if (sdebug_lbpws == 0) {
4350 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4351 return check_condition_result;
4355 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
4357 lba = get_unaligned_be64(cmd + 2);
4358 num = get_unaligned_be32(cmd + 10);
4359 if (num > sdebug_write_same_length) {
4360 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4361 return check_condition_result;
4363 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4366 /* Note the mode field is in the same position as the (lower) service action
4367 * field. For the Report supported operation codes command, SPC-4 suggests
4368 * each mode of this command should be reported separately; for future. */
4369 static int resp_write_buffer(struct scsi_cmnd *scp,
4370 struct sdebug_dev_info *devip)
4372 u8 *cmd = scp->cmnd;
4373 struct scsi_device *sdp = scp->device;
4374 struct sdebug_dev_info *dp;
4377 mode = cmd[1] & 0x1f;
4379 case 0x4: /* download microcode (MC) and activate (ACT) */
4380 /* set UAs on this device only */
4381 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4382 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4384 case 0x5: /* download MC, save and ACT */
4385 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4387 case 0x6: /* download MC with offsets and ACT */
4388 /* set UAs on most devices (LUs) in this target */
4389 list_for_each_entry(dp,
4390 &devip->sdbg_host->dev_info_list,
4392 if (dp->target == sdp->id) {
4393 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4395 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4399 case 0x7: /* download MC with offsets, save, and ACT */
4400 /* set UA on all devices (LUs) in this target */
4401 list_for_each_entry(dp,
4402 &devip->sdbg_host->dev_info_list,
4404 if (dp->target == sdp->id)
4405 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4409 /* do nothing for this command for other mode values */
4415 static int resp_comp_write(struct scsi_cmnd *scp,
4416 struct sdebug_dev_info *devip)
4418 u8 *cmd = scp->cmnd;
4420 struct sdeb_store_info *sip = devip2sip(devip, true);
4423 u32 lb_size = sdebug_sector_size;
4428 lba = get_unaligned_be64(cmd + 2);
4429 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
4431 return 0; /* degenerate case, not an error */
4432 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4434 mk_sense_invalid_opcode(scp);
4435 return check_condition_result;
4437 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4438 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4439 (cmd[1] & 0xe0) == 0)
4440 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4442 ret = check_device_access_params(scp, lba, num, false);
4446 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4448 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4450 return check_condition_result;
4453 sdeb_write_lock(sip);
4455 ret = do_dout_fetch(scp, dnum, arr);
4457 retval = DID_ERROR << 16;
4459 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4460 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4461 "indicated=%u, IO sent=%d bytes\n", my_name,
4462 dnum * lb_size, ret);
4463 if (!comp_write_worker(sip, lba, num, arr, false)) {
4464 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4465 retval = check_condition_result;
4468 if (scsi_debug_lbp())
4469 map_region(sip, lba, num);
4471 sdeb_write_unlock(sip);
4476 struct unmap_block_desc {
4482 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4485 struct unmap_block_desc *desc;
4486 struct sdeb_store_info *sip = devip2sip(devip, true);
4487 unsigned int i, payload_len, descriptors;
4490 if (!scsi_debug_lbp())
4491 return 0; /* fib and say its done */
4492 payload_len = get_unaligned_be16(scp->cmnd + 7);
4493 BUG_ON(scsi_bufflen(scp) != payload_len);
4495 descriptors = (payload_len - 8) / 16;
4496 if (descriptors > sdebug_unmap_max_desc) {
4497 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4498 return check_condition_result;
4501 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4503 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4505 return check_condition_result;
4508 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4510 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4511 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4513 desc = (void *)&buf[8];
4515 sdeb_write_lock(sip);
4517 for (i = 0 ; i < descriptors ; i++) {
4518 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4519 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4521 ret = check_device_access_params(scp, lba, num, true);
4525 unmap_region(sip, lba, num);
4531 sdeb_write_unlock(sip);
4537 #define SDEBUG_GET_LBA_STATUS_LEN 32
4539 static int resp_get_lba_status(struct scsi_cmnd *scp,
4540 struct sdebug_dev_info *devip)
4542 u8 *cmd = scp->cmnd;
4544 u32 alloc_len, mapped, num;
4546 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4548 lba = get_unaligned_be64(cmd + 2);
4549 alloc_len = get_unaligned_be32(cmd + 10);
4554 ret = check_device_access_params(scp, lba, 1, false);
4558 if (scsi_debug_lbp()) {
4559 struct sdeb_store_info *sip = devip2sip(devip, true);
4561 mapped = map_state(sip, lba, &num);
4564 /* following just in case virtual_gb changed */
4565 sdebug_capacity = get_sdebug_capacity();
4566 if (sdebug_capacity - lba <= 0xffffffff)
4567 num = sdebug_capacity - lba;
4572 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4573 put_unaligned_be32(20, arr); /* Parameter Data Length */
4574 put_unaligned_be64(lba, arr + 8); /* LBA */
4575 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4576 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4578 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4581 static int resp_get_stream_status(struct scsi_cmnd *scp,
4582 struct sdebug_dev_info *devip)
4584 u16 starting_stream_id, stream_id;
4585 const u8 *cmd = scp->cmnd;
4586 u32 alloc_len, offset;
4588 struct scsi_stream_status_header *h = (void *)arr;
4590 starting_stream_id = get_unaligned_be16(cmd + 4);
4591 alloc_len = get_unaligned_be32(cmd + 10);
4593 if (alloc_len < 8) {
4594 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4595 return check_condition_result;
4598 if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
4599 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
4600 return check_condition_result;
4604 * The GET STREAM STATUS command only reports status information
4605 * about open streams. Treat the non-permanent stream as open.
4607 put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
4608 &h->number_of_open_streams);
4610 for (offset = 8, stream_id = starting_stream_id;
4611 offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
4612 stream_id < MAXIMUM_NUMBER_OF_STREAMS;
4613 offset += 8, stream_id++) {
4614 struct scsi_stream_status *stream_status = (void *)arr + offset;
4616 stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
4617 put_unaligned_be16(stream_id,
4618 &stream_status->stream_identifier);
4619 stream_status->rel_lifetime = stream_id + 1;
4621 put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
4623 return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
4626 static int resp_sync_cache(struct scsi_cmnd *scp,
4627 struct sdebug_dev_info *devip)
4632 u8 *cmd = scp->cmnd;
4634 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4635 lba = get_unaligned_be32(cmd + 2);
4636 num_blocks = get_unaligned_be16(cmd + 7);
4637 } else { /* SYNCHRONIZE_CACHE(16) */
4638 lba = get_unaligned_be64(cmd + 2);
4639 num_blocks = get_unaligned_be32(cmd + 10);
4641 if (lba + num_blocks > sdebug_capacity) {
4642 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4643 return check_condition_result;
4645 if (!write_since_sync || (cmd[1] & 0x2))
4646 res = SDEG_RES_IMMED_MASK;
4647 else /* delay if write_since_sync and IMMED clear */
4648 write_since_sync = false;
4653 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4654 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4655 * a GOOD status otherwise. Model a disk with a big cache and yield
4656 * CONDITION MET. Actually tries to bring range in main memory into the
4657 * cache associated with the CPU(s).
4659 static int resp_pre_fetch(struct scsi_cmnd *scp,
4660 struct sdebug_dev_info *devip)
4664 u64 block, rest = 0;
4666 u8 *cmd = scp->cmnd;
4667 struct sdeb_store_info *sip = devip2sip(devip, true);
4668 u8 *fsp = sip->storep;
4670 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4671 lba = get_unaligned_be32(cmd + 2);
4672 nblks = get_unaligned_be16(cmd + 7);
4673 } else { /* PRE-FETCH(16) */
4674 lba = get_unaligned_be64(cmd + 2);
4675 nblks = get_unaligned_be32(cmd + 10);
4677 if (lba + nblks > sdebug_capacity) {
4678 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4679 return check_condition_result;
4683 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4684 block = do_div(lba, sdebug_store_sectors);
4685 if (block + nblks > sdebug_store_sectors)
4686 rest = block + nblks - sdebug_store_sectors;
4688 /* Try to bring the PRE-FETCH range into CPU's cache */
4689 sdeb_read_lock(sip);
4690 prefetch_range(fsp + (sdebug_sector_size * block),
4691 (nblks - rest) * sdebug_sector_size);
4693 prefetch_range(fsp, rest * sdebug_sector_size);
4694 sdeb_read_unlock(sip);
4697 res = SDEG_RES_IMMED_MASK;
4698 return res | condition_met_result;
4701 #define RL_BUCKET_ELEMS 8
4703 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4704 * (W-LUN), the normal Linux scanning logic does not associate it with a
4705 * device (e.g. /dev/sg7). The following magic will make that association:
4706 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4707 * where <n> is a host number. If there are multiple targets in a host then
4708 * the above will associate a W-LUN to each target. To only get a W-LUN
4709 * for target 2, then use "echo '- 2 49409' > scan" .
4711 static int resp_report_luns(struct scsi_cmnd *scp,
4712 struct sdebug_dev_info *devip)
4714 unsigned char *cmd = scp->cmnd;
4715 unsigned int alloc_len;
4716 unsigned char select_report;
4718 struct scsi_lun *lun_p;
4719 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4720 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4721 unsigned int wlun_cnt; /* report luns W-LUN count */
4722 unsigned int tlun_cnt; /* total LUN count */
4723 unsigned int rlen; /* response length (in bytes) */
4725 unsigned int off_rsp = 0;
4726 const int sz_lun = sizeof(struct scsi_lun);
4728 clear_luns_changed_on_target(devip);
4730 select_report = cmd[2];
4731 alloc_len = get_unaligned_be32(cmd + 6);
4733 if (alloc_len < 4) {
4734 pr_err("alloc len too small %d\n", alloc_len);
4735 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4736 return check_condition_result;
4739 switch (select_report) {
4740 case 0: /* all LUNs apart from W-LUNs */
4741 lun_cnt = sdebug_max_luns;
4744 case 1: /* only W-LUNs */
4748 case 2: /* all LUNs */
4749 lun_cnt = sdebug_max_luns;
4752 case 0x10: /* only administrative LUs */
4753 case 0x11: /* see SPC-5 */
4754 case 0x12: /* only subsiduary LUs owned by referenced LU */
4756 pr_debug("select report invalid %d\n", select_report);
4757 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4758 return check_condition_result;
4761 if (sdebug_no_lun_0 && (lun_cnt > 0))
4764 tlun_cnt = lun_cnt + wlun_cnt;
4765 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4766 scsi_set_resid(scp, scsi_bufflen(scp));
4767 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4768 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4770 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4771 lun = sdebug_no_lun_0 ? 1 : 0;
4772 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4773 memset(arr, 0, sizeof(arr));
4774 lun_p = (struct scsi_lun *)&arr[0];
4776 put_unaligned_be32(rlen, &arr[0]);
4780 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4781 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4783 int_to_scsilun(lun++, lun_p);
4784 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4785 lun_p->scsi_lun[0] |= 0x40;
4787 if (j < RL_BUCKET_ELEMS)
4790 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4796 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4800 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4804 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4806 bool is_bytchk3 = false;
4809 u32 vnum, a_num, off;
4810 const u32 lb_size = sdebug_sector_size;
4813 u8 *cmd = scp->cmnd;
4814 struct sdeb_store_info *sip = devip2sip(devip, true);
4816 bytchk = (cmd[1] >> 1) & 0x3;
4818 return 0; /* always claim internal verify okay */
4819 } else if (bytchk == 2) {
4820 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4821 return check_condition_result;
4822 } else if (bytchk == 3) {
4823 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4827 lba = get_unaligned_be64(cmd + 2);
4828 vnum = get_unaligned_be32(cmd + 10);
4830 case VERIFY: /* is VERIFY(10) */
4831 lba = get_unaligned_be32(cmd + 2);
4832 vnum = get_unaligned_be16(cmd + 7);
4835 mk_sense_invalid_opcode(scp);
4836 return check_condition_result;
4839 return 0; /* not an error */
4840 a_num = is_bytchk3 ? 1 : vnum;
4841 /* Treat following check like one for read (i.e. no write) access */
4842 ret = check_device_access_params(scp, lba, a_num, false);
4846 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4848 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4850 return check_condition_result;
4852 /* Not changing store, so only need read access */
4853 sdeb_read_lock(sip);
4855 ret = do_dout_fetch(scp, a_num, arr);
4857 ret = DID_ERROR << 16;
4859 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4860 sdev_printk(KERN_INFO, scp->device,
4861 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4862 my_name, __func__, a_num * lb_size, ret);
4865 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4866 memcpy(arr + off, arr, lb_size);
4869 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4870 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4871 ret = check_condition_result;
4875 sdeb_read_unlock(sip);
4880 #define RZONES_DESC_HD 64
4882 /* Report zones depending on start LBA and reporting options */
4883 static int resp_report_zones(struct scsi_cmnd *scp,
4884 struct sdebug_dev_info *devip)
4886 unsigned int rep_max_zones, nrz = 0;
4888 u32 alloc_len, rep_opts, rep_len;
4891 u8 *arr = NULL, *desc;
4892 u8 *cmd = scp->cmnd;
4893 struct sdeb_zone_state *zsp = NULL;
4894 struct sdeb_store_info *sip = devip2sip(devip, false);
4896 if (!sdebug_dev_is_zoned(devip)) {
4897 mk_sense_invalid_opcode(scp);
4898 return check_condition_result;
4900 zs_lba = get_unaligned_be64(cmd + 2);
4901 alloc_len = get_unaligned_be32(cmd + 10);
4903 return 0; /* not an error */
4904 rep_opts = cmd[14] & 0x3f;
4905 partial = cmd[14] & 0x80;
4907 if (zs_lba >= sdebug_capacity) {
4908 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4909 return check_condition_result;
4912 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4914 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4916 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4918 return check_condition_result;
4921 sdeb_read_lock(sip);
4924 for (lba = zs_lba; lba < sdebug_capacity;
4925 lba = zsp->z_start + zsp->z_size) {
4926 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4928 zsp = zbc_zone(devip, lba);
4935 if (zsp->z_cond != ZC1_EMPTY)
4939 /* Implicit open zones */
4940 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4944 /* Explicit open zones */
4945 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4950 if (zsp->z_cond != ZC4_CLOSED)
4955 if (zsp->z_cond != ZC5_FULL)
4962 * Read-only, offline, reset WP recommended are
4963 * not emulated: no zones to report;
4967 /* non-seq-resource set */
4968 if (!zsp->z_non_seq_resource)
4972 /* All zones except gap zones. */
4973 if (zbc_zone_is_gap(zsp))
4977 /* Not write pointer (conventional) zones */
4978 if (zbc_zone_is_seq(zsp))
4982 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4983 INVALID_FIELD_IN_CDB, 0);
4984 ret = check_condition_result;
4988 if (nrz < rep_max_zones) {
4989 /* Fill zone descriptor */
4990 desc[0] = zsp->z_type;
4991 desc[1] = zsp->z_cond << 4;
4992 if (zsp->z_non_seq_resource)
4994 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4995 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4996 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
5000 if (partial && nrz >= rep_max_zones)
5007 /* Zone list length. */
5008 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
5010 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
5011 /* Zone starting LBA granularity. */
5012 if (devip->zcap < devip->zsize)
5013 put_unaligned_be64(devip->zsize, arr + 16);
5015 rep_len = (unsigned long)desc - (unsigned long)arr;
5016 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
5019 sdeb_read_unlock(sip);
5024 /* Logic transplanted from tcmu-runner, file_zbc.c */
5025 static void zbc_open_all(struct sdebug_dev_info *devip)
5027 struct sdeb_zone_state *zsp = &devip->zstate[0];
5030 for (i = 0; i < devip->nr_zones; i++, zsp++) {
5031 if (zsp->z_cond == ZC4_CLOSED)
5032 zbc_open_zone(devip, &devip->zstate[i], true);
5036 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5040 enum sdebug_z_cond zc;
5041 u8 *cmd = scp->cmnd;
5042 struct sdeb_zone_state *zsp;
5043 bool all = cmd[14] & 0x01;
5044 struct sdeb_store_info *sip = devip2sip(devip, false);
5046 if (!sdebug_dev_is_zoned(devip)) {
5047 mk_sense_invalid_opcode(scp);
5048 return check_condition_result;
5051 sdeb_write_lock(sip);
5054 /* Check if all closed zones can be open */
5055 if (devip->max_open &&
5056 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
5057 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5059 res = check_condition_result;
5062 /* Open all closed zones */
5063 zbc_open_all(devip);
5067 /* Open the specified zone */
5068 z_id = get_unaligned_be64(cmd + 2);
5069 if (z_id >= sdebug_capacity) {
5070 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5071 res = check_condition_result;
5075 zsp = zbc_zone(devip, z_id);
5076 if (z_id != zsp->z_start) {
5077 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5078 res = check_condition_result;
5081 if (zbc_zone_is_conv(zsp)) {
5082 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5083 res = check_condition_result;
5088 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
5091 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
5092 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5094 res = check_condition_result;
5098 zbc_open_zone(devip, zsp, true);
5100 sdeb_write_unlock(sip);
5104 static void zbc_close_all(struct sdebug_dev_info *devip)
5108 for (i = 0; i < devip->nr_zones; i++)
5109 zbc_close_zone(devip, &devip->zstate[i]);
5112 static int resp_close_zone(struct scsi_cmnd *scp,
5113 struct sdebug_dev_info *devip)
5117 u8 *cmd = scp->cmnd;
5118 struct sdeb_zone_state *zsp;
5119 bool all = cmd[14] & 0x01;
5120 struct sdeb_store_info *sip = devip2sip(devip, false);
5122 if (!sdebug_dev_is_zoned(devip)) {
5123 mk_sense_invalid_opcode(scp);
5124 return check_condition_result;
5127 sdeb_write_lock(sip);
5130 zbc_close_all(devip);
5134 /* Close specified zone */
5135 z_id = get_unaligned_be64(cmd + 2);
5136 if (z_id >= sdebug_capacity) {
5137 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5138 res = check_condition_result;
5142 zsp = zbc_zone(devip, z_id);
5143 if (z_id != zsp->z_start) {
5144 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5145 res = check_condition_result;
5148 if (zbc_zone_is_conv(zsp)) {
5149 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5150 res = check_condition_result;
5154 zbc_close_zone(devip, zsp);
5156 sdeb_write_unlock(sip);
5160 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5161 struct sdeb_zone_state *zsp, bool empty)
5163 enum sdebug_z_cond zc = zsp->z_cond;
5165 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5166 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5167 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5168 zbc_close_zone(devip, zsp);
5169 if (zsp->z_cond == ZC4_CLOSED)
5171 zsp->z_wp = zsp->z_start + zsp->z_size;
5172 zsp->z_cond = ZC5_FULL;
5176 static void zbc_finish_all(struct sdebug_dev_info *devip)
5180 for (i = 0; i < devip->nr_zones; i++)
5181 zbc_finish_zone(devip, &devip->zstate[i], false);
5184 static int resp_finish_zone(struct scsi_cmnd *scp,
5185 struct sdebug_dev_info *devip)
5187 struct sdeb_zone_state *zsp;
5190 u8 *cmd = scp->cmnd;
5191 bool all = cmd[14] & 0x01;
5192 struct sdeb_store_info *sip = devip2sip(devip, false);
5194 if (!sdebug_dev_is_zoned(devip)) {
5195 mk_sense_invalid_opcode(scp);
5196 return check_condition_result;
5199 sdeb_write_lock(sip);
5202 zbc_finish_all(devip);
5206 /* Finish the specified zone */
5207 z_id = get_unaligned_be64(cmd + 2);
5208 if (z_id >= sdebug_capacity) {
5209 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5210 res = check_condition_result;
5214 zsp = zbc_zone(devip, z_id);
5215 if (z_id != zsp->z_start) {
5216 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5217 res = check_condition_result;
5220 if (zbc_zone_is_conv(zsp)) {
5221 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5222 res = check_condition_result;
5226 zbc_finish_zone(devip, zsp, true);
5228 sdeb_write_unlock(sip);
5232 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5233 struct sdeb_zone_state *zsp)
5235 enum sdebug_z_cond zc;
5236 struct sdeb_store_info *sip = devip2sip(devip, false);
5238 if (!zbc_zone_is_seq(zsp))
5242 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5243 zbc_close_zone(devip, zsp);
5245 if (zsp->z_cond == ZC4_CLOSED)
5248 if (zsp->z_wp > zsp->z_start)
5249 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5250 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5252 zsp->z_non_seq_resource = false;
5253 zsp->z_wp = zsp->z_start;
5254 zsp->z_cond = ZC1_EMPTY;
5257 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5261 for (i = 0; i < devip->nr_zones; i++)
5262 zbc_rwp_zone(devip, &devip->zstate[i]);
5265 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5267 struct sdeb_zone_state *zsp;
5270 u8 *cmd = scp->cmnd;
5271 bool all = cmd[14] & 0x01;
5272 struct sdeb_store_info *sip = devip2sip(devip, false);
5274 if (!sdebug_dev_is_zoned(devip)) {
5275 mk_sense_invalid_opcode(scp);
5276 return check_condition_result;
5279 sdeb_write_lock(sip);
5286 z_id = get_unaligned_be64(cmd + 2);
5287 if (z_id >= sdebug_capacity) {
5288 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5289 res = check_condition_result;
5293 zsp = zbc_zone(devip, z_id);
5294 if (z_id != zsp->z_start) {
5295 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5296 res = check_condition_result;
5299 if (zbc_zone_is_conv(zsp)) {
5300 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5301 res = check_condition_result;
5305 zbc_rwp_zone(devip, zsp);
5307 sdeb_write_unlock(sip);
5311 static u32 get_tag(struct scsi_cmnd *cmnd)
5313 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5316 /* Queued (deferred) command completions converge here. */
5317 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5319 struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5320 unsigned long flags;
5321 struct scsi_cmnd *scp = sqcp->scmd;
5322 struct sdebug_scsi_cmd *sdsc;
5325 if (sdebug_statistics) {
5326 atomic_inc(&sdebug_completions);
5327 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5328 atomic_inc(&sdebug_miss_cpus);
5332 pr_err("scmd=NULL\n");
5336 sdsc = scsi_cmd_priv(scp);
5337 spin_lock_irqsave(&sdsc->lock, flags);
5338 aborted = sd_dp->aborted;
5339 if (unlikely(aborted))
5340 sd_dp->aborted = false;
5341 ASSIGN_QUEUED_CMD(scp, NULL);
5343 spin_unlock_irqrestore(&sdsc->lock, flags);
5346 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5347 blk_abort_request(scsi_cmd_to_rq(scp));
5351 scsi_done(scp); /* callback to mid level */
5353 sdebug_free_queued_cmd(sqcp);
5356 /* When high resolution timer goes off this function is called. */
5357 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5359 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5361 sdebug_q_cmd_complete(sd_dp);
5362 return HRTIMER_NORESTART;
5365 /* When work queue schedules work, it calls this function. */
5366 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5368 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5370 sdebug_q_cmd_complete(sd_dp);
5373 static bool got_shared_uuid;
5374 static uuid_t shared_uuid;
5376 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5378 struct sdeb_zone_state *zsp;
5379 sector_t capacity = get_sdebug_capacity();
5380 sector_t conv_capacity;
5381 sector_t zstart = 0;
5385 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5386 * a zone size allowing for at least 4 zones on the device. Otherwise,
5387 * use the specified zone size checking that at least 2 zones can be
5388 * created for the device.
5390 if (!sdeb_zbc_zone_size_mb) {
5391 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5392 >> ilog2(sdebug_sector_size);
5393 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5395 if (devip->zsize < 2) {
5396 pr_err("Device capacity too small\n");
5400 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5401 pr_err("Zone size is not a power of 2\n");
5404 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5405 >> ilog2(sdebug_sector_size);
5406 if (devip->zsize >= capacity) {
5407 pr_err("Zone size too large for device capacity\n");
5412 devip->zsize_shift = ilog2(devip->zsize);
5413 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5415 if (sdeb_zbc_zone_cap_mb == 0) {
5416 devip->zcap = devip->zsize;
5418 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5419 ilog2(sdebug_sector_size);
5420 if (devip->zcap > devip->zsize) {
5421 pr_err("Zone capacity too large\n");
5426 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5427 if (conv_capacity >= capacity) {
5428 pr_err("Number of conventional zones too large\n");
5431 devip->nr_conv_zones = sdeb_zbc_nr_conv;
5432 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5434 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5436 /* Add gap zones if zone capacity is smaller than the zone size */
5437 if (devip->zcap < devip->zsize)
5438 devip->nr_zones += devip->nr_seq_zones;
5441 /* zbc_max_open_zones can be 0, meaning "not reported" */
5442 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5443 devip->max_open = (devip->nr_zones - 1) / 2;
5445 devip->max_open = sdeb_zbc_max_open;
5448 devip->zstate = kcalloc(devip->nr_zones,
5449 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5453 for (i = 0; i < devip->nr_zones; i++) {
5454 zsp = &devip->zstate[i];
5456 zsp->z_start = zstart;
5458 if (i < devip->nr_conv_zones) {
5459 zsp->z_type = ZBC_ZTYPE_CNV;
5460 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5461 zsp->z_wp = (sector_t)-1;
5463 min_t(u64, devip->zsize, capacity - zstart);
5464 } else if ((zstart & (devip->zsize - 1)) == 0) {
5466 zsp->z_type = ZBC_ZTYPE_SWR;
5468 zsp->z_type = ZBC_ZTYPE_SWP;
5469 zsp->z_cond = ZC1_EMPTY;
5470 zsp->z_wp = zsp->z_start;
5472 min_t(u64, devip->zcap, capacity - zstart);
5474 zsp->z_type = ZBC_ZTYPE_GAP;
5475 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5476 zsp->z_wp = (sector_t)-1;
5477 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5481 WARN_ON_ONCE((int)zsp->z_size <= 0);
5482 zstart += zsp->z_size;
5488 static struct sdebug_dev_info *sdebug_device_create(
5489 struct sdebug_host_info *sdbg_host, gfp_t flags)
5491 struct sdebug_dev_info *devip;
5493 devip = kzalloc(sizeof(*devip), flags);
5495 if (sdebug_uuid_ctl == 1)
5496 uuid_gen(&devip->lu_name);
5497 else if (sdebug_uuid_ctl == 2) {
5498 if (got_shared_uuid)
5499 devip->lu_name = shared_uuid;
5501 uuid_gen(&shared_uuid);
5502 got_shared_uuid = true;
5503 devip->lu_name = shared_uuid;
5506 devip->sdbg_host = sdbg_host;
5507 if (sdeb_zbc_in_use) {
5508 devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
5509 if (sdebug_device_create_zones(devip)) {
5514 devip->zoned = false;
5516 devip->create_ts = ktime_get_boottime();
5517 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5518 spin_lock_init(&devip->list_lock);
5519 INIT_LIST_HEAD(&devip->inject_err_list);
5520 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5525 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5527 struct sdebug_host_info *sdbg_host;
5528 struct sdebug_dev_info *open_devip = NULL;
5529 struct sdebug_dev_info *devip;
5531 sdbg_host = shost_to_sdebug_host(sdev->host);
5533 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5534 if ((devip->used) && (devip->channel == sdev->channel) &&
5535 (devip->target == sdev->id) &&
5536 (devip->lun == sdev->lun))
5539 if ((!devip->used) && (!open_devip))
5543 if (!open_devip) { /* try and make a new one */
5544 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5546 pr_err("out of memory at line %d\n", __LINE__);
5551 open_devip->channel = sdev->channel;
5552 open_devip->target = sdev->id;
5553 open_devip->lun = sdev->lun;
5554 open_devip->sdbg_host = sdbg_host;
5555 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5556 open_devip->used = true;
5560 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5563 pr_info("slave_alloc <%u %u %u %llu>\n",
5564 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5569 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5571 struct sdebug_dev_info *devip =
5572 (struct sdebug_dev_info *)sdp->hostdata;
5573 struct dentry *dentry;
5576 pr_info("slave_configure <%u %u %u %llu>\n",
5577 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5578 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5579 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5580 if (devip == NULL) {
5581 devip = find_build_dev_info(sdp);
5583 return 1; /* no resources, will be marked offline */
5585 sdp->hostdata = devip;
5587 sdp->no_uld_attach = 1;
5588 config_cdb_len(sdp);
5590 if (sdebug_allow_restart)
5591 sdp->allow_restart = 1;
5593 devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5594 sdebug_debugfs_root);
5595 if (IS_ERR_OR_NULL(devip->debugfs_entry))
5596 pr_info("%s: failed to create debugfs directory for device %s\n",
5597 __func__, dev_name(&sdp->sdev_gendev));
5599 dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5600 &sdebug_error_fops);
5601 if (IS_ERR_OR_NULL(dentry))
5602 pr_info("%s: failed to create error file for device %s\n",
5603 __func__, dev_name(&sdp->sdev_gendev));
5608 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5610 struct sdebug_dev_info *devip =
5611 (struct sdebug_dev_info *)sdp->hostdata;
5612 struct sdebug_err_inject *err;
5615 pr_info("slave_destroy <%u %u %u %llu>\n",
5616 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5621 spin_lock(&devip->list_lock);
5622 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5623 list_del_rcu(&err->list);
5624 call_rcu(&err->rcu, sdebug_err_free);
5626 spin_unlock(&devip->list_lock);
5628 debugfs_remove(devip->debugfs_entry);
5630 /* make this slot available for re-use */
5631 devip->used = false;
5632 sdp->hostdata = NULL;
5635 /* Returns true if we require the queued memory to be freed by the caller. */
5636 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5637 enum sdeb_defer_type defer_t)
5639 if (defer_t == SDEB_DEFER_HRT) {
5640 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5643 case 0: /* Not active, it must have already run */
5644 case -1: /* -1 It's executing the CB */
5646 case 1: /* Was active, we've now cancelled */
5650 } else if (defer_t == SDEB_DEFER_WQ) {
5651 /* Cancel if pending */
5652 if (cancel_work_sync(&sd_dp->ew.work))
5654 /* Was not pending, so it must have run */
5656 } else if (defer_t == SDEB_DEFER_POLL) {
5664 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5666 enum sdeb_defer_type l_defer_t;
5667 struct sdebug_defer *sd_dp;
5668 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5669 struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5671 lockdep_assert_held(&sdsc->lock);
5675 sd_dp = &sqcp->sd_dp;
5676 l_defer_t = READ_ONCE(sd_dp->defer_t);
5677 ASSIGN_QUEUED_CMD(cmnd, NULL);
5679 if (stop_qc_helper(sd_dp, l_defer_t))
5680 sdebug_free_queued_cmd(sqcp);
5686 * Called from scsi_debug_abort() only, which is for timed-out cmd.
5688 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5690 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5691 unsigned long flags;
5694 spin_lock_irqsave(&sdsc->lock, flags);
5695 res = scsi_debug_stop_cmnd(cmnd);
5696 spin_unlock_irqrestore(&sdsc->lock, flags);
5702 * All we can do is set the cmnd as internally aborted and wait for it to
5703 * finish. We cannot call scsi_done() as normal completion path may do that.
5705 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5707 scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5712 /* Deletes (stops) timers or work queues of all queued commands */
5713 static void stop_all_queued(void)
5715 struct sdebug_host_info *sdhp;
5717 mutex_lock(&sdebug_host_list_mutex);
5718 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5719 struct Scsi_Host *shost = sdhp->shost;
5721 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5723 mutex_unlock(&sdebug_host_list_mutex);
5726 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
5728 struct scsi_device *sdp = cmnd->device;
5729 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5730 struct sdebug_err_inject *err;
5731 unsigned char *cmd = cmnd->cmnd;
5738 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5739 if (err->type == ERR_ABORT_CMD_FAILED &&
5740 (err->cmd == cmd[0] || err->cmd == 0xff)) {
5754 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5756 bool ok = scsi_debug_abort_cmnd(SCpnt);
5757 u8 *cmd = SCpnt->cmnd;
5762 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5763 sdev_printk(KERN_INFO, SCpnt->device,
5764 "%s: command%s found\n", __func__,
5767 if (sdebug_fail_abort(SCpnt)) {
5768 scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
5776 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5778 struct scsi_device *sdp = data;
5779 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5781 if (scmd->device == sdp)
5782 scsi_debug_abort_cmnd(scmd);
5787 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5788 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5790 struct Scsi_Host *shost = sdp->host;
5792 blk_mq_tagset_busy_iter(&shost->tag_set,
5793 scsi_debug_stop_all_queued_iter, sdp);
5796 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
5798 struct scsi_device *sdp = cmnd->device;
5799 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5800 struct sdebug_err_inject *err;
5801 unsigned char *cmd = cmnd->cmnd;
5808 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5809 if (err->type == ERR_LUN_RESET_FAILED &&
5810 (err->cmd == cmd[0] || err->cmd == 0xff)) {
5824 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5826 struct scsi_device *sdp = SCpnt->device;
5827 struct sdebug_dev_info *devip = sdp->hostdata;
5828 u8 *cmd = SCpnt->cmnd;
5833 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5834 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5836 scsi_debug_stop_all_queued(sdp);
5838 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5840 if (sdebug_fail_lun_reset(SCpnt)) {
5841 scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
5848 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
5850 struct scsi_target *starget = scsi_target(cmnd->device);
5851 struct sdebug_target_info *targetip =
5852 (struct sdebug_target_info *)starget->hostdata;
5855 return targetip->reset_fail;
5860 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5862 struct scsi_device *sdp = SCpnt->device;
5863 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5864 struct sdebug_dev_info *devip;
5865 u8 *cmd = SCpnt->cmnd;
5869 ++num_target_resets;
5870 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5871 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5873 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5874 if (devip->target == sdp->id) {
5875 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5880 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5881 sdev_printk(KERN_INFO, sdp,
5882 "%s: %d device(s) found in target\n", __func__, k);
5884 if (sdebug_fail_target_reset(SCpnt)) {
5885 scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
5893 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5895 struct scsi_device *sdp = SCpnt->device;
5896 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5897 struct sdebug_dev_info *devip;
5902 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5903 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5905 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5906 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5910 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5911 sdev_printk(KERN_INFO, sdp,
5912 "%s: %d device(s) found in host\n", __func__, k);
5916 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5918 struct sdebug_host_info *sdbg_host;
5919 struct sdebug_dev_info *devip;
5923 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5924 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5925 mutex_lock(&sdebug_host_list_mutex);
5926 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5927 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5929 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5933 mutex_unlock(&sdebug_host_list_mutex);
5935 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5936 sdev_printk(KERN_INFO, SCpnt->device,
5937 "%s: %d device(s) found\n", __func__, k);
5941 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5943 struct msdos_partition *pp;
5944 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5945 int sectors_per_part, num_sectors, k;
5946 int heads_by_sects, start_sec, end_sec;
5948 /* assume partition table already zeroed */
5949 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5951 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5952 sdebug_num_parts = SDEBUG_MAX_PARTS;
5953 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5955 num_sectors = (int)get_sdebug_capacity();
5956 sectors_per_part = (num_sectors - sdebug_sectors_per)
5958 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5959 starts[0] = sdebug_sectors_per;
5960 max_part_secs = sectors_per_part;
5961 for (k = 1; k < sdebug_num_parts; ++k) {
5962 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5964 if (starts[k] - starts[k - 1] < max_part_secs)
5965 max_part_secs = starts[k] - starts[k - 1];
5967 starts[sdebug_num_parts] = num_sectors;
5968 starts[sdebug_num_parts + 1] = 0;
5970 ramp[510] = 0x55; /* magic partition markings */
5972 pp = (struct msdos_partition *)(ramp + 0x1be);
5973 for (k = 0; starts[k + 1]; ++k, ++pp) {
5974 start_sec = starts[k];
5975 end_sec = starts[k] + max_part_secs - 1;
5978 pp->cyl = start_sec / heads_by_sects;
5979 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5980 / sdebug_sectors_per;
5981 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5983 pp->end_cyl = end_sec / heads_by_sects;
5984 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5985 / sdebug_sectors_per;
5986 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5988 pp->start_sect = cpu_to_le32(start_sec);
5989 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5990 pp->sys_ind = 0x83; /* plain Linux partition */
5994 static void block_unblock_all_queues(bool block)
5996 struct sdebug_host_info *sdhp;
5998 lockdep_assert_held(&sdebug_host_list_mutex);
6000 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6001 struct Scsi_Host *shost = sdhp->shost;
6004 scsi_block_requests(shost);
6006 scsi_unblock_requests(shost);
6010 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
6011 * commands will be processed normally before triggers occur.
6013 static void tweak_cmnd_count(void)
6017 modulo = abs(sdebug_every_nth);
6021 mutex_lock(&sdebug_host_list_mutex);
6022 block_unblock_all_queues(true);
6023 count = atomic_read(&sdebug_cmnd_count);
6024 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
6025 block_unblock_all_queues(false);
6026 mutex_unlock(&sdebug_host_list_mutex);
6029 static void clear_queue_stats(void)
6031 atomic_set(&sdebug_cmnd_count, 0);
6032 atomic_set(&sdebug_completions, 0);
6033 atomic_set(&sdebug_miss_cpus, 0);
6034 atomic_set(&sdebug_a_tsf, 0);
6037 static bool inject_on_this_cmd(void)
6039 if (sdebug_every_nth == 0)
6041 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
6044 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
6047 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
6050 kmem_cache_free(queued_cmd_cache, sqcp);
6053 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
6055 struct sdebug_queued_cmd *sqcp;
6056 struct sdebug_defer *sd_dp;
6058 sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
6062 sd_dp = &sqcp->sd_dp;
6064 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6065 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
6066 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
6073 /* Complete the processing of the thread that queued a SCSI command to this
6074 * driver. It either completes the command by calling cmnd_done() or
6075 * schedules a hr timer or work queue then returns 0. Returns
6076 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
6078 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
6080 int (*pfp)(struct scsi_cmnd *,
6081 struct sdebug_dev_info *),
6082 int delta_jiff, int ndelay)
6084 struct request *rq = scsi_cmd_to_rq(cmnd);
6085 bool polled = rq->cmd_flags & REQ_POLLED;
6086 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6087 unsigned long flags;
6088 u64 ns_from_boot = 0;
6089 struct sdebug_queued_cmd *sqcp;
6090 struct scsi_device *sdp;
6091 struct sdebug_defer *sd_dp;
6093 if (unlikely(devip == NULL)) {
6094 if (scsi_result == 0)
6095 scsi_result = DID_NO_CONNECT << 16;
6096 goto respond_in_thread;
6100 if (delta_jiff == 0)
6101 goto respond_in_thread;
6104 if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
6105 (scsi_result == 0))) {
6106 int num_in_q = scsi_device_busy(sdp);
6107 int qdepth = cmnd->device->queue_depth;
6109 if ((num_in_q == qdepth) &&
6110 (atomic_inc_return(&sdebug_a_tsf) >=
6111 abs(sdebug_every_nth))) {
6112 atomic_set(&sdebug_a_tsf, 0);
6113 scsi_result = device_qfull_result;
6115 if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6116 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6117 __func__, num_in_q);
6121 sqcp = sdebug_alloc_queued_cmd(cmnd);
6123 pr_err("%s no alloc\n", __func__);
6124 return SCSI_MLQUEUE_HOST_BUSY;
6126 sd_dp = &sqcp->sd_dp;
6129 ns_from_boot = ktime_get_boottime_ns();
6131 /* one of the resp_*() response functions is called here */
6132 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6133 if (cmnd->result & SDEG_RES_IMMED_MASK) {
6134 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6135 delta_jiff = ndelay = 0;
6137 if (cmnd->result == 0 && scsi_result != 0)
6138 cmnd->result = scsi_result;
6139 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6140 if (atomic_read(&sdeb_inject_pending)) {
6141 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6142 atomic_set(&sdeb_inject_pending, 0);
6143 cmnd->result = check_condition_result;
6147 if (unlikely(sdebug_verbose && cmnd->result))
6148 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6149 __func__, cmnd->result);
6151 if (delta_jiff > 0 || ndelay > 0) {
6154 if (delta_jiff > 0) {
6155 u64 ns = jiffies_to_nsecs(delta_jiff);
6157 if (sdebug_random && ns < U32_MAX) {
6158 ns = get_random_u32_below((u32)ns);
6159 } else if (sdebug_random) {
6160 ns >>= 12; /* scale to 4 usec precision */
6161 if (ns < U32_MAX) /* over 4 hours max */
6162 ns = get_random_u32_below((u32)ns);
6165 kt = ns_to_ktime(ns);
6166 } else { /* ndelay has a 4.2 second max */
6167 kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6169 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6170 u64 d = ktime_get_boottime_ns() - ns_from_boot;
6172 if (kt <= d) { /* elapsed duration >= kt */
6173 /* call scsi_done() from this thread */
6174 sdebug_free_queued_cmd(sqcp);
6178 /* otherwise reduce kt by elapsed time */
6182 if (sdebug_statistics)
6183 sd_dp->issuing_cpu = raw_smp_processor_id();
6185 spin_lock_irqsave(&sdsc->lock, flags);
6186 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6187 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6188 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6189 spin_unlock_irqrestore(&sdsc->lock, flags);
6191 /* schedule the invocation of scsi_done() for a later time */
6192 spin_lock_irqsave(&sdsc->lock, flags);
6193 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6194 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6195 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6197 * The completion handler will try to grab sqcp->lock,
6198 * so there is no chance that the completion handler
6199 * will call scsi_done() until we release the lock
6200 * here (so ok to keep referencing sdsc).
6202 spin_unlock_irqrestore(&sdsc->lock, flags);
6204 } else { /* jdelay < 0, use work queue */
6205 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6206 atomic_read(&sdeb_inject_pending))) {
6207 sd_dp->aborted = true;
6208 atomic_set(&sdeb_inject_pending, 0);
6209 sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6210 blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6213 if (sdebug_statistics)
6214 sd_dp->issuing_cpu = raw_smp_processor_id();
6216 spin_lock_irqsave(&sdsc->lock, flags);
6217 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6218 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6219 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6220 spin_unlock_irqrestore(&sdsc->lock, flags);
6222 spin_lock_irqsave(&sdsc->lock, flags);
6223 ASSIGN_QUEUED_CMD(cmnd, sqcp);
6224 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6225 schedule_work(&sd_dp->ew.work);
6226 spin_unlock_irqrestore(&sdsc->lock, flags);
6232 respond_in_thread: /* call back to mid-layer using invocation thread */
6233 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6234 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6235 if (cmnd->result == 0 && scsi_result != 0)
6236 cmnd->result = scsi_result;
6241 /* Note: The following macros create attribute files in the
6242 /sys/module/scsi_debug/parameters directory. Unfortunately this
6243 driver is unaware of a change and cannot trigger auxiliary actions
6244 as it can when the corresponding attribute in the
6245 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6247 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6248 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6249 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6250 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6251 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6252 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6253 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6254 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6255 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6256 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6257 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6258 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6259 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6260 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6261 module_param_string(inq_product, sdebug_inq_product_id,
6262 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6263 module_param_string(inq_rev, sdebug_inq_product_rev,
6264 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6265 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6266 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6267 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6268 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6269 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6270 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6271 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6272 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6273 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6274 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6275 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6277 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6279 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6280 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6281 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6282 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6283 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6284 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6285 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6286 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6287 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6288 module_param_named(per_host_store, sdebug_per_host_store, bool,
6290 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6291 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6292 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6293 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6294 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6295 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6296 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6297 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6298 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6299 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6300 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6301 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6302 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6303 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6304 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6305 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6306 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6307 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6309 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6310 module_param_named(write_same_length, sdebug_write_same_length, int,
6312 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6313 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6314 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6315 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6316 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6317 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6319 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6320 MODULE_DESCRIPTION("SCSI debug adapter driver");
6321 MODULE_LICENSE("GPL");
6322 MODULE_VERSION(SDEBUG_VERSION);
6324 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6325 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6326 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6327 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6328 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6329 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6330 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6331 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6332 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6333 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6334 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6335 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6336 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6337 MODULE_PARM_DESC(host_max_queue,
6338 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6339 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6340 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6341 SDEBUG_VERSION "\")");
6342 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6343 MODULE_PARM_DESC(lbprz,
6344 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6345 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6346 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6347 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6348 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6349 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6350 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6351 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6352 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6353 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6354 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6355 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6356 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6357 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6358 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6359 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6360 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6361 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6362 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6363 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6364 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6365 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6366 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6367 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6368 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6369 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6370 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6371 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6372 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6373 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6374 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6375 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6376 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6377 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6378 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6379 MODULE_PARM_DESC(uuid_ctl,
6380 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6381 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6382 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6383 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6384 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6385 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6386 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6387 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6388 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6389 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6390 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6392 #define SDEBUG_INFO_LEN 256
6393 static char sdebug_info[SDEBUG_INFO_LEN];
6395 static const char *scsi_debug_info(struct Scsi_Host *shp)
6399 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6400 my_name, SDEBUG_VERSION, sdebug_version_date);
6401 if (k >= (SDEBUG_INFO_LEN - 1))
6403 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6404 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6405 sdebug_dev_size_mb, sdebug_opts, submit_queues,
6406 "statistics", (int)sdebug_statistics);
6410 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
6411 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6416 int minLen = length > 15 ? 15 : length;
6418 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6420 memcpy(arr, buffer, minLen);
6422 if (1 != sscanf(arr, "%d", &opts))
6425 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6426 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6427 if (sdebug_every_nth != 0)
6432 struct sdebug_submit_queue_data {
6438 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6440 struct sdebug_submit_queue_data *data = opaque;
6441 u32 unique_tag = blk_mq_unique_tag(rq);
6442 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6443 u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6444 int queue_num = data->queue_num;
6446 if (hwq != queue_num)
6449 /* Rely on iter'ing in ascending tag order */
6450 if (*data->first == -1)
6451 *data->first = *data->last = tag;
6458 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6459 * same for each scsi_debug host (if more than one). Some of the counters
6460 * output are not atomics so might be inaccurate in a busy system. */
6461 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6463 struct sdebug_host_info *sdhp;
6466 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6467 SDEBUG_VERSION, sdebug_version_date);
6468 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6469 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6470 sdebug_opts, sdebug_every_nth);
6471 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6472 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6473 sdebug_sector_size, "bytes");
6474 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6475 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6477 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6478 num_dev_resets, num_target_resets, num_bus_resets,
6480 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6481 dix_reads, dix_writes, dif_errors);
6482 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6484 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6485 atomic_read(&sdebug_cmnd_count),
6486 atomic_read(&sdebug_completions),
6487 "miss_cpus", atomic_read(&sdebug_miss_cpus),
6488 atomic_read(&sdebug_a_tsf),
6489 atomic_read(&sdeb_mq_poll_count));
6491 seq_printf(m, "submit_queues=%d\n", submit_queues);
6492 for (j = 0; j < submit_queues; ++j) {
6494 struct sdebug_submit_queue_data data = {
6499 seq_printf(m, " queue %d:\n", j);
6500 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6503 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
6504 "first,last bits", f, l);
6508 seq_printf(m, "this host_no=%d\n", host->host_no);
6509 if (!xa_empty(per_store_ap)) {
6512 unsigned long l_idx;
6513 struct sdeb_store_info *sip;
6515 seq_puts(m, "\nhost list:\n");
6517 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6519 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
6520 sdhp->shost->host_no, idx);
6523 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6524 sdeb_most_recent_idx);
6526 xa_for_each(per_store_ap, l_idx, sip) {
6527 niu = xa_get_mark(per_store_ap, l_idx,
6528 SDEB_XA_NOT_IN_USE);
6530 seq_printf(m, " %d: idx=%d%s\n", j, idx,
6531 (niu ? " not_in_use" : ""));
6538 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6540 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6542 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6543 * of delay is jiffies.
6545 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6550 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6552 if (sdebug_jdelay != jdelay) {
6553 struct sdebug_host_info *sdhp;
6555 mutex_lock(&sdebug_host_list_mutex);
6556 block_unblock_all_queues(true);
6558 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6559 struct Scsi_Host *shost = sdhp->shost;
6561 if (scsi_host_busy(shost)) {
6562 res = -EBUSY; /* queued commands */
6567 sdebug_jdelay = jdelay;
6570 block_unblock_all_queues(false);
6571 mutex_unlock(&sdebug_host_list_mutex);
6577 static DRIVER_ATTR_RW(delay);
6579 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6581 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6583 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6584 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6585 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6590 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6591 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6593 if (sdebug_ndelay != ndelay) {
6594 struct sdebug_host_info *sdhp;
6596 mutex_lock(&sdebug_host_list_mutex);
6597 block_unblock_all_queues(true);
6599 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6600 struct Scsi_Host *shost = sdhp->shost;
6602 if (scsi_host_busy(shost)) {
6603 res = -EBUSY; /* queued commands */
6609 sdebug_ndelay = ndelay;
6610 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6613 block_unblock_all_queues(false);
6614 mutex_unlock(&sdebug_host_list_mutex);
6620 static DRIVER_ATTR_RW(ndelay);
6622 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6624 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6627 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6633 if (sscanf(buf, "%10s", work) == 1) {
6634 if (strncasecmp(work, "0x", 2) == 0) {
6635 if (kstrtoint(work + 2, 16, &opts) == 0)
6638 if (kstrtoint(work, 10, &opts) == 0)
6645 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6646 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6650 static DRIVER_ATTR_RW(opts);
6652 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6654 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6656 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6661 /* Cannot change from or to TYPE_ZBC with sysfs */
6662 if (sdebug_ptype == TYPE_ZBC)
6665 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6673 static DRIVER_ATTR_RW(ptype);
6675 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6677 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6679 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6684 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6690 static DRIVER_ATTR_RW(dsense);
6692 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6694 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6696 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6701 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6702 bool want_store = (n == 0);
6703 struct sdebug_host_info *sdhp;
6706 sdebug_fake_rw = (sdebug_fake_rw > 0);
6707 if (sdebug_fake_rw == n)
6708 return count; /* not transitioning so do nothing */
6710 if (want_store) { /* 1 --> 0 transition, set up store */
6711 if (sdeb_first_idx < 0) {
6712 idx = sdebug_add_store();
6716 idx = sdeb_first_idx;
6717 xa_clear_mark(per_store_ap, idx,
6718 SDEB_XA_NOT_IN_USE);
6720 /* make all hosts use same store */
6721 list_for_each_entry(sdhp, &sdebug_host_list,
6723 if (sdhp->si_idx != idx) {
6724 xa_set_mark(per_store_ap, sdhp->si_idx,
6725 SDEB_XA_NOT_IN_USE);
6729 sdeb_most_recent_idx = idx;
6730 } else { /* 0 --> 1 transition is trigger for shrink */
6731 sdebug_erase_all_stores(true /* apart from first */);
6738 static DRIVER_ATTR_RW(fake_rw);
6740 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6742 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6744 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6749 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6750 sdebug_no_lun_0 = n;
6755 static DRIVER_ATTR_RW(no_lun_0);
6757 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6759 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6761 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6766 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6767 sdebug_num_tgts = n;
6768 sdebug_max_tgts_luns();
6773 static DRIVER_ATTR_RW(num_tgts);
6775 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6777 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6779 static DRIVER_ATTR_RO(dev_size_mb);
6781 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6783 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6786 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6791 if (kstrtobool(buf, &v))
6794 sdebug_per_host_store = v;
6797 static DRIVER_ATTR_RW(per_host_store);
6799 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6801 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6803 static DRIVER_ATTR_RO(num_parts);
6805 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6807 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6809 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6815 if (sscanf(buf, "%10s", work) == 1) {
6816 if (strncasecmp(work, "0x", 2) == 0) {
6817 if (kstrtoint(work + 2, 16, &nth) == 0)
6818 goto every_nth_done;
6820 if (kstrtoint(work, 10, &nth) == 0)
6821 goto every_nth_done;
6827 sdebug_every_nth = nth;
6828 if (nth && !sdebug_statistics) {
6829 pr_info("every_nth needs statistics=1, set it\n");
6830 sdebug_statistics = true;
6835 static DRIVER_ATTR_RW(every_nth);
6837 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6839 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6841 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6847 if (kstrtoint(buf, 0, &n))
6850 if (n > (int)SAM_LUN_AM_FLAT) {
6851 pr_warn("only LUN address methods 0 and 1 are supported\n");
6854 changed = ((int)sdebug_lun_am != n);
6856 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6857 struct sdebug_host_info *sdhp;
6858 struct sdebug_dev_info *dp;
6860 mutex_lock(&sdebug_host_list_mutex);
6861 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6862 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6863 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6866 mutex_unlock(&sdebug_host_list_mutex);
6872 static DRIVER_ATTR_RW(lun_format);
6874 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6876 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6878 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6884 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6886 pr_warn("max_luns can be no more than 256\n");
6889 changed = (sdebug_max_luns != n);
6890 sdebug_max_luns = n;
6891 sdebug_max_tgts_luns();
6892 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6893 struct sdebug_host_info *sdhp;
6894 struct sdebug_dev_info *dp;
6896 mutex_lock(&sdebug_host_list_mutex);
6897 list_for_each_entry(sdhp, &sdebug_host_list,
6899 list_for_each_entry(dp, &sdhp->dev_info_list,
6901 set_bit(SDEBUG_UA_LUNS_CHANGED,
6905 mutex_unlock(&sdebug_host_list_mutex);
6911 static DRIVER_ATTR_RW(max_luns);
6913 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6915 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6917 /* N.B. max_queue can be changed while there are queued commands. In flight
6918 * commands beyond the new max_queue will be completed. */
6919 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6924 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6925 (n <= SDEBUG_CANQUEUE) &&
6926 (sdebug_host_max_queue == 0)) {
6927 mutex_lock(&sdebug_host_list_mutex);
6929 /* We may only change sdebug_max_queue when we have no shosts */
6930 if (list_empty(&sdebug_host_list))
6931 sdebug_max_queue = n;
6934 mutex_unlock(&sdebug_host_list_mutex);
6939 static DRIVER_ATTR_RW(max_queue);
6941 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6943 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6946 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6948 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6951 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6955 if (kstrtobool(buf, &v))
6958 sdebug_no_rwlock = v;
6961 static DRIVER_ATTR_RW(no_rwlock);
6964 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6965 * in range [0, sdebug_host_max_queue), we can't change it.
6967 static DRIVER_ATTR_RO(host_max_queue);
6969 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6971 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6973 static DRIVER_ATTR_RO(no_uld);
6975 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6977 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6979 static DRIVER_ATTR_RO(scsi_level);
6981 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6983 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6985 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6991 /* Ignore capacity change for ZBC drives for now */
6992 if (sdeb_zbc_in_use)
6995 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6996 changed = (sdebug_virtual_gb != n);
6997 sdebug_virtual_gb = n;
6998 sdebug_capacity = get_sdebug_capacity();
7000 struct sdebug_host_info *sdhp;
7001 struct sdebug_dev_info *dp;
7003 mutex_lock(&sdebug_host_list_mutex);
7004 list_for_each_entry(sdhp, &sdebug_host_list,
7006 list_for_each_entry(dp, &sdhp->dev_info_list,
7008 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
7012 mutex_unlock(&sdebug_host_list_mutex);
7018 static DRIVER_ATTR_RW(virtual_gb);
7020 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
7022 /* absolute number of hosts currently active is what is shown */
7023 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
7026 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
7031 struct sdeb_store_info *sip;
7032 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
7035 if (sscanf(buf, "%d", &delta_hosts) != 1)
7037 if (delta_hosts > 0) {
7041 xa_for_each_marked(per_store_ap, idx, sip,
7042 SDEB_XA_NOT_IN_USE) {
7043 sdeb_most_recent_idx = (int)idx;
7047 if (found) /* re-use case */
7048 sdebug_add_host_helper((int)idx);
7050 sdebug_do_add_host(true);
7052 sdebug_do_add_host(false);
7054 } while (--delta_hosts);
7055 } else if (delta_hosts < 0) {
7057 sdebug_do_remove_host(false);
7058 } while (++delta_hosts);
7062 static DRIVER_ATTR_RW(add_host);
7064 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
7066 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
7068 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
7073 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7074 sdebug_vpd_use_hostno = n;
7079 static DRIVER_ATTR_RW(vpd_use_hostno);
7081 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
7083 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
7085 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
7090 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
7092 sdebug_statistics = true;
7094 clear_queue_stats();
7095 sdebug_statistics = false;
7101 static DRIVER_ATTR_RW(statistics);
7103 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
7105 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
7107 static DRIVER_ATTR_RO(sector_size);
7109 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
7111 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
7113 static DRIVER_ATTR_RO(submit_queues);
7115 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7117 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7119 static DRIVER_ATTR_RO(dix);
7121 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7123 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7125 static DRIVER_ATTR_RO(dif);
7127 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7129 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7131 static DRIVER_ATTR_RO(guard);
7133 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7135 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7137 static DRIVER_ATTR_RO(ato);
7139 static ssize_t map_show(struct device_driver *ddp, char *buf)
7143 if (!scsi_debug_lbp())
7144 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7145 sdebug_store_sectors);
7147 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7148 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7151 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7152 (int)map_size, sip->map_storep);
7154 buf[count++] = '\n';
7159 static DRIVER_ATTR_RO(map);
7161 static ssize_t random_show(struct device_driver *ddp, char *buf)
7163 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7166 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7171 if (kstrtobool(buf, &v))
7177 static DRIVER_ATTR_RW(random);
7179 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7181 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7183 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7188 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7189 sdebug_removable = (n > 0);
7194 static DRIVER_ATTR_RW(removable);
7196 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7198 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7200 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
7201 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7206 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7207 sdebug_host_lock = (n > 0);
7212 static DRIVER_ATTR_RW(host_lock);
7214 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7216 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7218 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7223 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7224 sdebug_strict = (n > 0);
7229 static DRIVER_ATTR_RW(strict);
7231 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7233 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7235 static DRIVER_ATTR_RO(uuid_ctl);
7237 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7239 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7241 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7246 ret = kstrtoint(buf, 0, &n);
7250 all_config_cdb_len();
7253 static DRIVER_ATTR_RW(cdb_len);
7255 static const char * const zbc_model_strs_a[] = {
7256 [BLK_ZONED_NONE] = "none",
7257 [BLK_ZONED_HA] = "host-aware",
7258 [BLK_ZONED_HM] = "host-managed",
7261 static const char * const zbc_model_strs_b[] = {
7262 [BLK_ZONED_NONE] = "no",
7263 [BLK_ZONED_HA] = "aware",
7264 [BLK_ZONED_HM] = "managed",
7267 static const char * const zbc_model_strs_c[] = {
7268 [BLK_ZONED_NONE] = "0",
7269 [BLK_ZONED_HA] = "1",
7270 [BLK_ZONED_HM] = "2",
7273 static int sdeb_zbc_model_str(const char *cp)
7275 int res = sysfs_match_string(zbc_model_strs_a, cp);
7278 res = sysfs_match_string(zbc_model_strs_b, cp);
7280 res = sysfs_match_string(zbc_model_strs_c, cp);
7288 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7290 return scnprintf(buf, PAGE_SIZE, "%s\n",
7291 zbc_model_strs_a[sdeb_zbc_model]);
7293 static DRIVER_ATTR_RO(zbc);
7295 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7297 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7299 static DRIVER_ATTR_RO(tur_ms_to_ready);
7301 /* Note: The following array creates attribute files in the
7302 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7303 files (over those found in the /sys/module/scsi_debug/parameters
7304 directory) is that auxiliary actions can be triggered when an attribute
7305 is changed. For example see: add_host_store() above.
7308 static struct attribute *sdebug_drv_attrs[] = {
7309 &driver_attr_delay.attr,
7310 &driver_attr_opts.attr,
7311 &driver_attr_ptype.attr,
7312 &driver_attr_dsense.attr,
7313 &driver_attr_fake_rw.attr,
7314 &driver_attr_host_max_queue.attr,
7315 &driver_attr_no_lun_0.attr,
7316 &driver_attr_num_tgts.attr,
7317 &driver_attr_dev_size_mb.attr,
7318 &driver_attr_num_parts.attr,
7319 &driver_attr_every_nth.attr,
7320 &driver_attr_lun_format.attr,
7321 &driver_attr_max_luns.attr,
7322 &driver_attr_max_queue.attr,
7323 &driver_attr_no_rwlock.attr,
7324 &driver_attr_no_uld.attr,
7325 &driver_attr_scsi_level.attr,
7326 &driver_attr_virtual_gb.attr,
7327 &driver_attr_add_host.attr,
7328 &driver_attr_per_host_store.attr,
7329 &driver_attr_vpd_use_hostno.attr,
7330 &driver_attr_sector_size.attr,
7331 &driver_attr_statistics.attr,
7332 &driver_attr_submit_queues.attr,
7333 &driver_attr_dix.attr,
7334 &driver_attr_dif.attr,
7335 &driver_attr_guard.attr,
7336 &driver_attr_ato.attr,
7337 &driver_attr_map.attr,
7338 &driver_attr_random.attr,
7339 &driver_attr_removable.attr,
7340 &driver_attr_host_lock.attr,
7341 &driver_attr_ndelay.attr,
7342 &driver_attr_strict.attr,
7343 &driver_attr_uuid_ctl.attr,
7344 &driver_attr_cdb_len.attr,
7345 &driver_attr_tur_ms_to_ready.attr,
7346 &driver_attr_zbc.attr,
7349 ATTRIBUTE_GROUPS(sdebug_drv);
7351 static struct device *pseudo_primary;
7353 static int __init scsi_debug_init(void)
7355 bool want_store = (sdebug_fake_rw == 0);
7357 int k, ret, hosts_to_add;
7360 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7361 pr_warn("ndelay must be less than 1 second, ignored\n");
7363 } else if (sdebug_ndelay > 0)
7364 sdebug_jdelay = JDELAY_OVERRIDDEN;
7366 switch (sdebug_sector_size) {
7373 pr_err("invalid sector_size %d\n", sdebug_sector_size);
7377 switch (sdebug_dif) {
7378 case T10_PI_TYPE0_PROTECTION:
7380 case T10_PI_TYPE1_PROTECTION:
7381 case T10_PI_TYPE2_PROTECTION:
7382 case T10_PI_TYPE3_PROTECTION:
7383 have_dif_prot = true;
7387 pr_err("dif must be 0, 1, 2 or 3\n");
7391 if (sdebug_num_tgts < 0) {
7392 pr_err("num_tgts must be >= 0\n");
7396 if (sdebug_guard > 1) {
7397 pr_err("guard must be 0 or 1\n");
7401 if (sdebug_ato > 1) {
7402 pr_err("ato must be 0 or 1\n");
7406 if (sdebug_physblk_exp > 15) {
7407 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7411 sdebug_lun_am = sdebug_lun_am_i;
7412 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7413 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7414 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7417 if (sdebug_max_luns > 256) {
7418 if (sdebug_max_luns > 16384) {
7419 pr_warn("max_luns can be no more than 16384, use default\n");
7420 sdebug_max_luns = DEF_MAX_LUNS;
7422 sdebug_lun_am = SAM_LUN_AM_FLAT;
7425 if (sdebug_lowest_aligned > 0x3fff) {
7426 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7430 if (submit_queues < 1) {
7431 pr_err("submit_queues must be 1 or more\n");
7435 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7436 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7440 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7441 (sdebug_host_max_queue < 0)) {
7442 pr_err("host_max_queue must be in range [0 %d]\n",
7447 if (sdebug_host_max_queue &&
7448 (sdebug_max_queue != sdebug_host_max_queue)) {
7449 sdebug_max_queue = sdebug_host_max_queue;
7450 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7455 * check for host managed zoned block device specified with
7456 * ptype=0x14 or zbc=XXX.
7458 if (sdebug_ptype == TYPE_ZBC) {
7459 sdeb_zbc_model = BLK_ZONED_HM;
7460 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7461 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7465 switch (sdeb_zbc_model) {
7466 case BLK_ZONED_NONE:
7468 sdebug_ptype = TYPE_DISK;
7471 sdebug_ptype = TYPE_ZBC;
7474 pr_err("Invalid ZBC model\n");
7478 if (sdeb_zbc_model != BLK_ZONED_NONE) {
7479 sdeb_zbc_in_use = true;
7480 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7481 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7484 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7485 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7486 if (sdebug_dev_size_mb < 1)
7487 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
7488 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7489 sdebug_store_sectors = sz / sdebug_sector_size;
7490 sdebug_capacity = get_sdebug_capacity();
7492 /* play around with geometry, don't waste too much on track 0 */
7494 sdebug_sectors_per = 32;
7495 if (sdebug_dev_size_mb >= 256)
7497 else if (sdebug_dev_size_mb >= 16)
7499 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7500 (sdebug_sectors_per * sdebug_heads);
7501 if (sdebug_cylinders_per >= 1024) {
7502 /* other LLDs do this; implies >= 1GB ram disk ... */
7504 sdebug_sectors_per = 63;
7505 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7506 (sdebug_sectors_per * sdebug_heads);
7508 if (scsi_debug_lbp()) {
7509 sdebug_unmap_max_blocks =
7510 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7512 sdebug_unmap_max_desc =
7513 clamp(sdebug_unmap_max_desc, 0U, 256U);
7515 sdebug_unmap_granularity =
7516 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7518 if (sdebug_unmap_alignment &&
7519 sdebug_unmap_granularity <=
7520 sdebug_unmap_alignment) {
7521 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7525 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7527 idx = sdebug_add_store();
7532 pseudo_primary = root_device_register("pseudo_0");
7533 if (IS_ERR(pseudo_primary)) {
7534 pr_warn("root_device_register() error\n");
7535 ret = PTR_ERR(pseudo_primary);
7538 ret = bus_register(&pseudo_lld_bus);
7540 pr_warn("bus_register error: %d\n", ret);
7543 ret = driver_register(&sdebug_driverfs_driver);
7545 pr_warn("driver_register error: %d\n", ret);
7549 hosts_to_add = sdebug_add_host;
7550 sdebug_add_host = 0;
7552 queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7553 if (!queued_cmd_cache) {
7558 sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7559 if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7560 pr_info("%s: failed to create initial debugfs directory\n", __func__);
7562 for (k = 0; k < hosts_to_add; k++) {
7563 if (want_store && k == 0) {
7564 ret = sdebug_add_host_helper(idx);
7566 pr_err("add_host_helper k=%d, error=%d\n",
7571 ret = sdebug_do_add_host(want_store &&
7572 sdebug_per_host_store);
7574 pr_err("add_host k=%d error=%d\n", k, -ret);
7580 pr_info("built %d host(s)\n", sdebug_num_hosts);
7585 driver_unregister(&sdebug_driverfs_driver);
7587 bus_unregister(&pseudo_lld_bus);
7589 root_device_unregister(pseudo_primary);
7591 sdebug_erase_store(idx, NULL);
7595 static void __exit scsi_debug_exit(void)
7597 int k = sdebug_num_hosts;
7600 sdebug_do_remove_host(true);
7601 kmem_cache_destroy(queued_cmd_cache);
7602 driver_unregister(&sdebug_driverfs_driver);
7603 bus_unregister(&pseudo_lld_bus);
7604 root_device_unregister(pseudo_primary);
7606 sdebug_erase_all_stores(false);
7607 xa_destroy(per_store_ap);
7608 debugfs_remove(sdebug_debugfs_root);
7611 device_initcall(scsi_debug_init);
7612 module_exit(scsi_debug_exit);
7614 static void sdebug_release_adapter(struct device *dev)
7616 struct sdebug_host_info *sdbg_host;
7618 sdbg_host = dev_to_sdebug_host(dev);
7622 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7623 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7628 if (xa_empty(per_store_ap))
7630 sip = xa_load(per_store_ap, idx);
7634 vfree(sip->map_storep);
7635 vfree(sip->dif_storep);
7637 xa_erase(per_store_ap, idx);
7641 /* Assume apart_from_first==false only in shutdown case. */
7642 static void sdebug_erase_all_stores(bool apart_from_first)
7645 struct sdeb_store_info *sip = NULL;
7647 xa_for_each(per_store_ap, idx, sip) {
7648 if (apart_from_first)
7649 apart_from_first = false;
7651 sdebug_erase_store(idx, sip);
7653 if (apart_from_first)
7654 sdeb_most_recent_idx = sdeb_first_idx;
7658 * Returns store xarray new element index (idx) if >=0 else negated errno.
7659 * Limit the number of stores to 65536.
7661 static int sdebug_add_store(void)
7665 unsigned long iflags;
7666 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7667 struct sdeb_store_info *sip = NULL;
7668 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7670 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7674 xa_lock_irqsave(per_store_ap, iflags);
7675 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7676 if (unlikely(res < 0)) {
7677 xa_unlock_irqrestore(per_store_ap, iflags);
7679 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7682 sdeb_most_recent_idx = n_idx;
7683 if (sdeb_first_idx < 0)
7684 sdeb_first_idx = n_idx;
7685 xa_unlock_irqrestore(per_store_ap, iflags);
7688 sip->storep = vzalloc(sz);
7690 pr_err("user data oom\n");
7693 if (sdebug_num_parts > 0)
7694 sdebug_build_parts(sip->storep, sz);
7696 /* DIF/DIX: what T10 calls Protection Information (PI) */
7700 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7701 sip->dif_storep = vmalloc(dif_size);
7703 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7706 if (!sip->dif_storep) {
7707 pr_err("DIX oom\n");
7710 memset(sip->dif_storep, 0xff, dif_size);
7712 /* Logical Block Provisioning */
7713 if (scsi_debug_lbp()) {
7714 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7715 sip->map_storep = vmalloc(array_size(sizeof(long),
7716 BITS_TO_LONGS(map_size)));
7718 pr_info("%lu provisioning blocks\n", map_size);
7720 if (!sip->map_storep) {
7721 pr_err("LBP map oom\n");
7725 bitmap_zero(sip->map_storep, map_size);
7727 /* Map first 1KB for partition table */
7728 if (sdebug_num_parts)
7729 map_region(sip, 0, 2);
7732 rwlock_init(&sip->macc_lck);
7735 sdebug_erase_store((int)n_idx, sip);
7736 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7740 static int sdebug_add_host_helper(int per_host_idx)
7742 int k, devs_per_host, idx;
7743 int error = -ENOMEM;
7744 struct sdebug_host_info *sdbg_host;
7745 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7747 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7750 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7751 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7752 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7753 sdbg_host->si_idx = idx;
7755 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7757 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7758 for (k = 0; k < devs_per_host; k++) {
7759 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7764 mutex_lock(&sdebug_host_list_mutex);
7765 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7766 mutex_unlock(&sdebug_host_list_mutex);
7768 sdbg_host->dev.bus = &pseudo_lld_bus;
7769 sdbg_host->dev.parent = pseudo_primary;
7770 sdbg_host->dev.release = &sdebug_release_adapter;
7771 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7773 error = device_register(&sdbg_host->dev);
7775 mutex_lock(&sdebug_host_list_mutex);
7776 list_del(&sdbg_host->host_list);
7777 mutex_unlock(&sdebug_host_list_mutex);
7785 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7787 list_del(&sdbg_devinfo->dev_list);
7788 kfree(sdbg_devinfo->zstate);
7789 kfree(sdbg_devinfo);
7791 if (sdbg_host->dev.release)
7792 put_device(&sdbg_host->dev);
7795 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7799 static int sdebug_do_add_host(bool mk_new_store)
7801 int ph_idx = sdeb_most_recent_idx;
7804 ph_idx = sdebug_add_store();
7808 return sdebug_add_host_helper(ph_idx);
7811 static void sdebug_do_remove_host(bool the_end)
7814 struct sdebug_host_info *sdbg_host = NULL;
7815 struct sdebug_host_info *sdbg_host2;
7817 mutex_lock(&sdebug_host_list_mutex);
7818 if (!list_empty(&sdebug_host_list)) {
7819 sdbg_host = list_entry(sdebug_host_list.prev,
7820 struct sdebug_host_info, host_list);
7821 idx = sdbg_host->si_idx;
7823 if (!the_end && idx >= 0) {
7826 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7827 if (sdbg_host2 == sdbg_host)
7829 if (idx == sdbg_host2->si_idx) {
7835 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7836 if (idx == sdeb_most_recent_idx)
7837 --sdeb_most_recent_idx;
7841 list_del(&sdbg_host->host_list);
7842 mutex_unlock(&sdebug_host_list_mutex);
7847 device_unregister(&sdbg_host->dev);
7851 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7853 struct sdebug_dev_info *devip = sdev->hostdata;
7858 mutex_lock(&sdebug_host_list_mutex);
7859 block_unblock_all_queues(true);
7861 if (qdepth > SDEBUG_CANQUEUE) {
7862 qdepth = SDEBUG_CANQUEUE;
7863 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7864 qdepth, SDEBUG_CANQUEUE);
7868 if (qdepth != sdev->queue_depth)
7869 scsi_change_queue_depth(sdev, qdepth);
7871 block_unblock_all_queues(false);
7872 mutex_unlock(&sdebug_host_list_mutex);
7874 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7875 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7877 return sdev->queue_depth;
7880 static bool fake_timeout(struct scsi_cmnd *scp)
7882 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7883 if (sdebug_every_nth < -1)
7884 sdebug_every_nth = -1;
7885 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7886 return true; /* ignore command causing timeout */
7887 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7888 scsi_medium_access_command(scp))
7889 return true; /* time out reads and writes */
7894 /* Response to TUR or media access command when device stopped */
7895 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7899 ktime_t now_ts = ktime_get_boottime();
7900 struct scsi_device *sdp = scp->device;
7902 stopped_state = atomic_read(&devip->stopped);
7903 if (stopped_state == 2) {
7904 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7905 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7906 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7907 /* tur_ms_to_ready timer extinguished */
7908 atomic_set(&devip->stopped, 0);
7912 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7914 sdev_printk(KERN_INFO, sdp,
7915 "%s: Not ready: in process of becoming ready\n", my_name);
7916 if (scp->cmnd[0] == TEST_UNIT_READY) {
7917 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7919 if (diff_ns <= tur_nanosecs_to_ready)
7920 diff_ns = tur_nanosecs_to_ready - diff_ns;
7922 diff_ns = tur_nanosecs_to_ready;
7923 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7924 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7925 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7927 return check_condition_result;
7930 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7932 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7934 return check_condition_result;
7937 static void sdebug_map_queues(struct Scsi_Host *shost)
7941 if (shost->nr_hw_queues == 1)
7944 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7945 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7949 if (i == HCTX_TYPE_DEFAULT)
7950 map->nr_queues = submit_queues - poll_queues;
7951 else if (i == HCTX_TYPE_POLL)
7952 map->nr_queues = poll_queues;
7954 if (!map->nr_queues) {
7955 BUG_ON(i == HCTX_TYPE_DEFAULT);
7959 map->queue_offset = qoff;
7960 blk_mq_map_queues(map);
7962 qoff += map->nr_queues;
7966 struct sdebug_blk_mq_poll_data {
7967 unsigned int queue_num;
7972 * We don't handle aborted commands here, but it does not seem possible to have
7973 * aborted polled commands from schedule_resp()
7975 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7977 struct sdebug_blk_mq_poll_data *data = opaque;
7978 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7979 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7980 struct sdebug_defer *sd_dp;
7981 u32 unique_tag = blk_mq_unique_tag(rq);
7982 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7983 struct sdebug_queued_cmd *sqcp;
7984 unsigned long flags;
7985 int queue_num = data->queue_num;
7988 /* We're only interested in one queue for this iteration */
7989 if (hwq != queue_num)
7992 /* Subsequent checks would fail if this failed, but check anyway */
7993 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7996 time = ktime_get_boottime();
7998 spin_lock_irqsave(&sdsc->lock, flags);
7999 sqcp = TO_QUEUED_CMD(cmd);
8001 spin_unlock_irqrestore(&sdsc->lock, flags);
8005 sd_dp = &sqcp->sd_dp;
8006 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
8007 spin_unlock_irqrestore(&sdsc->lock, flags);
8011 if (time < sd_dp->cmpl_ts) {
8012 spin_unlock_irqrestore(&sdsc->lock, flags);
8016 ASSIGN_QUEUED_CMD(cmd, NULL);
8017 spin_unlock_irqrestore(&sdsc->lock, flags);
8019 if (sdebug_statistics) {
8020 atomic_inc(&sdebug_completions);
8021 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
8022 atomic_inc(&sdebug_miss_cpus);
8025 sdebug_free_queued_cmd(sqcp);
8027 scsi_done(cmd); /* callback to mid level */
8028 (*data->num_entries)++;
8032 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
8034 int num_entries = 0;
8035 struct sdebug_blk_mq_poll_data data = {
8036 .queue_num = queue_num,
8037 .num_entries = &num_entries,
8040 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
8043 if (num_entries > 0)
8044 atomic_add(num_entries, &sdeb_mq_poll_count);
8048 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
8050 struct scsi_device *sdp = cmnd->device;
8051 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8052 struct sdebug_err_inject *err;
8053 unsigned char *cmd = cmnd->cmnd;
8060 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8061 if (err->type == ERR_TMOUT_CMD &&
8062 (err->cmd == cmd[0] || err->cmd == 0xff)) {
8076 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
8078 struct scsi_device *sdp = cmnd->device;
8079 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8080 struct sdebug_err_inject *err;
8081 unsigned char *cmd = cmnd->cmnd;
8088 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8089 if (err->type == ERR_FAIL_QUEUE_CMD &&
8090 (err->cmd == cmd[0] || err->cmd == 0xff)) {
8091 ret = err->cnt ? err->queuecmd_ret : 0;
8104 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
8105 struct sdebug_err_inject *info)
8107 struct scsi_device *sdp = cmnd->device;
8108 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8109 struct sdebug_err_inject *err;
8110 unsigned char *cmd = cmnd->cmnd;
8118 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8119 if (err->type == ERR_FAIL_CMD &&
8120 (err->cmd == cmd[0] || err->cmd == 0xff)) {
8138 mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8139 result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8141 *retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8146 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8147 struct scsi_cmnd *scp)
8150 struct scsi_device *sdp = scp->device;
8151 const struct opcode_info_t *oip;
8152 const struct opcode_info_t *r_oip;
8153 struct sdebug_dev_info *devip;
8154 u8 *cmd = scp->cmnd;
8155 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8156 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8159 u64 lun_index = sdp->lun & 0x3FFF;
8166 struct sdebug_err_inject err;
8168 scsi_set_resid(scp, 0);
8169 if (sdebug_statistics) {
8170 atomic_inc(&sdebug_cmnd_count);
8171 inject_now = inject_on_this_cmd();
8175 if (unlikely(sdebug_verbose &&
8176 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8181 sb = (int)sizeof(b);
8183 strcpy(b, "too long, over 32 bytes");
8185 for (k = 0, n = 0; k < len && n < sb; ++k)
8186 n += scnprintf(b + n, sb - n, "%02x ",
8189 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8190 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8192 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8193 return SCSI_MLQUEUE_HOST_BUSY;
8194 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8195 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8198 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
8199 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
8200 devip = (struct sdebug_dev_info *)sdp->hostdata;
8201 if (unlikely(!devip)) {
8202 devip = find_build_dev_info(sdp);
8207 if (sdebug_timeout_cmd(scp)) {
8208 scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8212 ret = sdebug_fail_queue_cmd(scp);
8214 scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8219 if (sdebug_fail_cmd(scp, &ret, &err)) {
8220 scmd_printk(KERN_INFO, scp,
8221 "fail command 0x%x with hostbyte=0x%x, "
8222 "driverbyte=0x%x, statusbyte=0x%x, "
8223 "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8224 opcode, err.host_byte, err.driver_byte,
8225 err.status_byte, err.sense_key, err.asc, err.asq);
8229 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8230 atomic_set(&sdeb_inject_pending, 1);
8232 na = oip->num_attached;
8234 if (na) { /* multiple commands with this opcode */
8236 if (FF_SA & r_oip->flags) {
8237 if (F_SA_LOW & oip->flags)
8240 sa = get_unaligned_be16(cmd + 8);
8241 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8242 if (opcode == oip->opcode && sa == oip->sa)
8245 } else { /* since no service action only check opcode */
8246 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8247 if (opcode == oip->opcode)
8252 if (F_SA_LOW & r_oip->flags)
8253 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8254 else if (F_SA_HIGH & r_oip->flags)
8255 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8257 mk_sense_invalid_opcode(scp);
8260 } /* else (when na==0) we assume the oip is a match */
8262 if (unlikely(F_INV_OP & flags)) {
8263 mk_sense_invalid_opcode(scp);
8266 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8268 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8269 my_name, opcode, " supported for wlun");
8270 mk_sense_invalid_opcode(scp);
8273 if (unlikely(sdebug_strict)) { /* check cdb against mask */
8277 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8278 rem = ~oip->len_mask[k] & cmd[k];
8280 for (j = 7; j >= 0; --j, rem <<= 1) {
8284 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8289 if (unlikely(!(F_SKIP_UA & flags) &&
8290 find_first_bit(devip->uas_bm,
8291 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8292 errsts = make_ua(scp, devip);
8296 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8297 atomic_read(&devip->stopped))) {
8298 errsts = resp_not_ready(scp, devip);
8302 if (sdebug_fake_rw && (F_FAKE_RW & flags))
8304 if (unlikely(sdebug_every_nth)) {
8305 if (fake_timeout(scp))
8306 return 0; /* ignore command: make trouble */
8308 if (likely(oip->pfp))
8309 pfp = oip->pfp; /* calls a resp_* function */
8311 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
8314 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
8315 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8316 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8317 sdebug_ndelay > 10000)) {
8319 * Skip long delays if ndelay <= 10 microseconds. Otherwise
8320 * for Start Stop Unit (SSU) want at least 1 second delay and
8321 * if sdebug_jdelay>1 want a long delay of that many seconds.
8322 * For Synchronize Cache want 1/20 of SSU's delay.
8324 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8325 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8327 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8328 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8330 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8333 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8335 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8338 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8340 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8342 spin_lock_init(&sdsc->lock);
8347 static struct scsi_host_template sdebug_driver_template = {
8348 .show_info = scsi_debug_show_info,
8349 .write_info = scsi_debug_write_info,
8350 .proc_name = sdebug_proc_name,
8351 .name = "SCSI DEBUG",
8352 .info = scsi_debug_info,
8353 .slave_alloc = scsi_debug_slave_alloc,
8354 .slave_configure = scsi_debug_slave_configure,
8355 .slave_destroy = scsi_debug_slave_destroy,
8356 .ioctl = scsi_debug_ioctl,
8357 .queuecommand = scsi_debug_queuecommand,
8358 .change_queue_depth = sdebug_change_qdepth,
8359 .map_queues = sdebug_map_queues,
8360 .mq_poll = sdebug_blk_mq_poll,
8361 .eh_abort_handler = scsi_debug_abort,
8362 .eh_device_reset_handler = scsi_debug_device_reset,
8363 .eh_target_reset_handler = scsi_debug_target_reset,
8364 .eh_bus_reset_handler = scsi_debug_bus_reset,
8365 .eh_host_reset_handler = scsi_debug_host_reset,
8366 .can_queue = SDEBUG_CANQUEUE,
8368 .sg_tablesize = SG_MAX_SEGMENTS,
8369 .cmd_per_lun = DEF_CMD_PER_LUN,
8371 .max_segment_size = -1U,
8372 .module = THIS_MODULE,
8373 .track_queue_depth = 1,
8374 .cmd_size = sizeof(struct sdebug_scsi_cmd),
8375 .init_cmd_priv = sdebug_init_cmd_priv,
8376 .target_alloc = sdebug_target_alloc,
8377 .target_destroy = sdebug_target_destroy,
8380 static int sdebug_driver_probe(struct device *dev)
8383 struct sdebug_host_info *sdbg_host;
8384 struct Scsi_Host *hpnt;
8387 sdbg_host = dev_to_sdebug_host(dev);
8389 sdebug_driver_template.can_queue = sdebug_max_queue;
8390 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8391 if (!sdebug_clustering)
8392 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8394 hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8396 pr_err("scsi_host_alloc failed\n");
8400 if (submit_queues > nr_cpu_ids) {
8401 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8402 my_name, submit_queues, nr_cpu_ids);
8403 submit_queues = nr_cpu_ids;
8406 * Decide whether to tell scsi subsystem that we want mq. The
8407 * following should give the same answer for each host.
8409 hpnt->nr_hw_queues = submit_queues;
8410 if (sdebug_host_max_queue)
8411 hpnt->host_tagset = 1;
8413 /* poll queues are possible for nr_hw_queues > 1 */
8414 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8415 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8416 my_name, poll_queues, hpnt->nr_hw_queues);
8421 * Poll queues don't need interrupts, but we need at least one I/O queue
8422 * left over for non-polled I/O.
8423 * If condition not met, trim poll_queues to 1 (just for simplicity).
8425 if (poll_queues >= submit_queues) {
8426 if (submit_queues < 3)
8427 pr_warn("%s: trim poll_queues to 1\n", my_name);
8429 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8430 my_name, submit_queues - 1);
8436 sdbg_host->shost = hpnt;
8437 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8438 hpnt->max_id = sdebug_num_tgts + 1;
8440 hpnt->max_id = sdebug_num_tgts;
8441 /* = sdebug_max_luns; */
8442 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8446 switch (sdebug_dif) {
8448 case T10_PI_TYPE1_PROTECTION:
8449 hprot = SHOST_DIF_TYPE1_PROTECTION;
8451 hprot |= SHOST_DIX_TYPE1_PROTECTION;
8454 case T10_PI_TYPE2_PROTECTION:
8455 hprot = SHOST_DIF_TYPE2_PROTECTION;
8457 hprot |= SHOST_DIX_TYPE2_PROTECTION;
8460 case T10_PI_TYPE3_PROTECTION:
8461 hprot = SHOST_DIF_TYPE3_PROTECTION;
8463 hprot |= SHOST_DIX_TYPE3_PROTECTION;
8468 hprot |= SHOST_DIX_TYPE0_PROTECTION;
8472 scsi_host_set_prot(hpnt, hprot);
8474 if (have_dif_prot || sdebug_dix)
8475 pr_info("host protection%s%s%s%s%s%s%s\n",
8476 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8477 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8478 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8479 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8480 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8481 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8482 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8484 if (sdebug_guard == 1)
8485 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8487 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8489 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8490 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8491 if (sdebug_every_nth) /* need stats counters for every_nth */
8492 sdebug_statistics = true;
8493 error = scsi_add_host(hpnt, &sdbg_host->dev);
8495 pr_err("scsi_add_host failed\n");
8497 scsi_host_put(hpnt);
8499 scsi_scan_host(hpnt);
8505 static void sdebug_driver_remove(struct device *dev)
8507 struct sdebug_host_info *sdbg_host;
8508 struct sdebug_dev_info *sdbg_devinfo, *tmp;
8510 sdbg_host = dev_to_sdebug_host(dev);
8512 scsi_remove_host(sdbg_host->shost);
8514 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8516 list_del(&sdbg_devinfo->dev_list);
8517 kfree(sdbg_devinfo->zstate);
8518 kfree(sdbg_devinfo);
8521 scsi_host_put(sdbg_host->shost);
8524 static struct bus_type pseudo_lld_bus = {
8526 .probe = sdebug_driver_probe,
8527 .remove = sdebug_driver_remove,
8528 .drv_groups = sdebug_drv_groups,