1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2020 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT 0
117 #define DEF_DEV_SIZE_MB 8
118 #define DEF_ZBC_DEV_SIZE_MB 128
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE 0
123 #define DEF_EVERY_NTH 0
124 #define DEF_FAKE_RW 0
126 #define DEF_HOST_LOCK 0
129 #define DEF_LBPWS10 0
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0 0
134 #define DEF_NUM_PARTS 0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB 0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB 128
160 #define DEF_ZBC_MAX_OPEN_ZONES 8
161 #define DEF_ZBC_NR_CONV_ZONES 1
163 #define SDEBUG_LUN_0_VAL 0
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE 1
167 #define SDEBUG_OPT_MEDIUM_ERR 2
168 #define SDEBUG_OPT_TIMEOUT 4
169 #define SDEBUG_OPT_RECOVERED_ERR 8
170 #define SDEBUG_OPT_TRANSPORT_ERR 16
171 #define SDEBUG_OPT_DIF_ERR 32
172 #define SDEBUG_OPT_DIX_ERR 64
173 #define SDEBUG_OPT_MAC_TIMEOUT 128
174 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
175 #define SDEBUG_OPT_Q_NOISE 0x200
176 #define SDEBUG_OPT_ALL_TSF 0x400
177 #define SDEBUG_OPT_RARE_TSF 0x800
178 #define SDEBUG_OPT_N_WCE 0x1000
179 #define SDEBUG_OPT_RESET_NOISE 0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
181 #define SDEBUG_OPT_HOST_BUSY 0x8000
182 #define SDEBUG_OPT_CMD_ABORT 0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 SDEBUG_OPT_TRANSPORT_ERR | \
187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 SDEBUG_OPT_SHORT_TRANSFER | \
189 SDEBUG_OPT_HOST_BUSY | \
190 SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195 * priority order. In the subset implemented here lower numbers have higher
196 * priority. The UA numbers should be a sequence starting from 0 with
197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208 * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213 * (for response) per submit queue at one time. Can be reduced by max_queue
214 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217 * but cannot exceed SDEBUG_CANQUEUE .
219 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN 1 /* Data-in command (e.g. READ) */
225 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
228 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
231 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
234 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
236 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
245 #define SDEBUG_MAX_PARTS 4
247 #define SDEBUG_MAX_CMD_LEN 32
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
251 /* Zone types (zbcr05 table 25) */
253 ZBC_ZONE_TYPE_CNV = 0x1,
254 ZBC_ZONE_TYPE_SWR = 0x2,
255 ZBC_ZONE_TYPE_SWP = 0x3,
258 /* enumeration names taken from table 26, zbcr05 */
260 ZBC_NOT_WRITE_POINTER = 0x0,
262 ZC2_IMPLICIT_OPEN = 0x2,
263 ZC3_EXPLICIT_OPEN = 0x3,
270 struct sdeb_zone_state { /* ZBC: per zone state */
271 enum sdebug_z_type z_type;
272 enum sdebug_z_cond z_cond;
273 bool z_non_seq_resource;
279 struct sdebug_dev_info {
280 struct list_head dev_list;
281 unsigned int channel;
285 struct sdebug_host_info *sdbg_host;
286 unsigned long uas_bm[1];
288 atomic_t stopped; /* 1: by SSU, 2: device start */
291 /* For ZBC devices */
292 enum blk_zoned_model zmodel;
294 unsigned int zsize_shift;
295 unsigned int nr_zones;
296 unsigned int nr_conv_zones;
297 unsigned int nr_imp_open;
298 unsigned int nr_exp_open;
299 unsigned int nr_closed;
300 unsigned int max_open;
301 ktime_t create_ts; /* time since bootup that this device was created */
302 struct sdeb_zone_state *zstate;
305 struct sdebug_host_info {
306 struct list_head host_list;
307 int si_idx; /* sdeb_store_info (per host) xarray index */
308 struct Scsi_Host *shost;
310 struct list_head dev_info_list;
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 rwlock_t macc_lck; /* for atomic media access on this store */
316 u8 *storep; /* user data storage (ram) */
317 struct t10_pi_tuple *dif_storep; /* protection info */
318 void *map_storep; /* provisioning map */
321 #define to_sdebug_host(d) \
322 container_of(d, struct sdebug_host_info, dev)
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
327 struct sdebug_defer {
329 struct execute_work ew;
330 ktime_t cmpl_ts;/* time since boot to complete this cmd */
331 int sqa_idx; /* index of sdebug_queue array */
332 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
333 int hc_idx; /* hostwide tag index */
338 bool aborted; /* true when blk_abort_request() already called */
339 enum sdeb_defer_type defer_t;
342 struct sdebug_queued_cmd {
343 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
344 * instance indicates this slot is in use.
346 struct sdebug_defer *sd_dp;
347 struct scsi_cmnd *a_cmnd;
350 struct sdebug_queue {
351 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
352 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
354 atomic_t blocked; /* to temporarily stop more being queued */
357 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
358 static atomic_t sdebug_completions; /* count of deferred completions */
359 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
360 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
361 static atomic_t sdeb_inject_pending;
362 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
364 struct opcode_info_t {
365 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
366 /* for terminating element */
367 u8 opcode; /* if num_attached > 0, preferred */
368 u16 sa; /* service action */
369 u32 flags; /* OR-ed set of SDEB_F_* */
370 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
371 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
372 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
373 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
376 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
377 enum sdeb_opcode_index {
378 SDEB_I_INVALID_OPCODE = 0,
380 SDEB_I_REPORT_LUNS = 2,
381 SDEB_I_REQUEST_SENSE = 3,
382 SDEB_I_TEST_UNIT_READY = 4,
383 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
384 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
385 SDEB_I_LOG_SENSE = 7,
386 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
387 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
388 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
389 SDEB_I_START_STOP = 11,
390 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
391 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
392 SDEB_I_MAINT_IN = 14,
393 SDEB_I_MAINT_OUT = 15,
394 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
395 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
396 SDEB_I_RESERVE = 18, /* 6, 10 */
397 SDEB_I_RELEASE = 19, /* 6, 10 */
398 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
399 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
400 SDEB_I_ATA_PT = 22, /* 12, 16 */
401 SDEB_I_SEND_DIAG = 23,
403 SDEB_I_WRITE_BUFFER = 25,
404 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
405 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
406 SDEB_I_COMP_WRITE = 28,
407 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
408 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
409 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
410 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
414 static const unsigned char opcode_ind_arr[256] = {
415 /* 0x0; 0x0->0x1f: 6 byte cdbs */
416 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
418 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
419 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
421 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
422 SDEB_I_ALLOW_REMOVAL, 0,
423 /* 0x20; 0x20->0x3f: 10 byte cdbs */
424 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
425 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
426 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
427 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
428 /* 0x40; 0x40->0x5f: 10 byte cdbs */
429 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
430 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
431 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
433 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
434 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 0, SDEB_I_VARIABLE_LEN,
438 /* 0x80; 0x80->0x9f: 16 byte cdbs */
439 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
440 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
441 0, 0, 0, SDEB_I_VERIFY,
442 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
443 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
444 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
445 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
446 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
447 SDEB_I_MAINT_OUT, 0, 0, 0,
448 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
449 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
450 0, 0, 0, 0, 0, 0, 0, 0,
451 0, 0, 0, 0, 0, 0, 0, 0,
452 /* 0xc0; 0xc0->0xff: vendor specific */
453 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 * The following "response" functions return the SCSI mid-level's 4 byte
461 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
462 * command completion, they can mask their return value with
463 * SDEG_RES_IMMED_MASK .
465 #define SDEG_RES_IMMED_MASK 0x40000000
467 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int sdebug_do_add_host(bool mk_new_store);
498 static int sdebug_add_host_helper(int per_host_idx);
499 static void sdebug_do_remove_host(bool the_end);
500 static int sdebug_add_store(void);
501 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
502 static void sdebug_erase_all_stores(bool apart_from_first);
505 * The following are overflow arrays for cdbs that "hit" the same index in
506 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
507 * should be placed in opcode_info_arr[], the others should be placed here.
509 static const struct opcode_info_t msense_iarr[] = {
510 {0, 0x1a, 0, F_D_IN, NULL, NULL,
511 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 static const struct opcode_info_t mselect_iarr[] = {
515 {0, 0x15, 0, F_D_OUT, NULL, NULL,
516 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
519 static const struct opcode_info_t read_iarr[] = {
520 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
521 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
523 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
524 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
526 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
530 static const struct opcode_info_t write_iarr[] = {
531 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
532 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
534 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
535 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
537 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
538 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 0xbf, 0xc7, 0, 0, 0, 0} },
542 static const struct opcode_info_t verify_iarr[] = {
543 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
544 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
548 static const struct opcode_info_t sa_in_16_iarr[] = {
549 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
550 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
551 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
554 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
555 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
556 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
557 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
558 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
559 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
560 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
563 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
564 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
565 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
566 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
567 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
568 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
569 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
572 static const struct opcode_info_t write_same_iarr[] = {
573 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
574 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
575 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
578 static const struct opcode_info_t reserve_iarr[] = {
579 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
580 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 static const struct opcode_info_t release_iarr[] = {
584 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
585 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
588 static const struct opcode_info_t sync_cache_iarr[] = {
589 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
590 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
591 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
594 static const struct opcode_info_t pre_fetch_iarr[] = {
595 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
596 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
600 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
601 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
602 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
604 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
605 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
607 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
608 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
612 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
613 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
614 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
619 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
620 * plus the terminating elements for logic that scans this table such as
621 * REPORT SUPPORTED OPERATION CODES. */
622 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
624 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
625 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
626 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
627 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
628 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
629 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
630 0, 0} }, /* REPORT LUNS */
631 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
632 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
634 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
637 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
638 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
640 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
641 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
643 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
645 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
646 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
648 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
649 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
650 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
652 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
653 resp_write_dt0, write_iarr, /* WRITE(16) */
654 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
655 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
656 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
657 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
658 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
659 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
660 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
662 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
663 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
664 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
665 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
666 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
667 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
668 0xff, 0, 0xc7, 0, 0, 0, 0} },
670 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
671 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
672 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
673 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
674 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
676 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
677 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
678 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
680 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
681 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
682 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
685 NULL, release_iarr, /* RELEASE(10) <no response function> */
686 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
690 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
692 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
694 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
695 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
696 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
698 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
700 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
701 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
702 0, 0, 0, 0} }, /* WRITE_BUFFER */
703 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
704 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
705 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
707 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
708 resp_sync_cache, sync_cache_iarr,
709 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
710 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
711 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
712 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
713 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
714 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
715 resp_pre_fetch, pre_fetch_iarr,
716 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
717 0, 0, 0, 0} }, /* PRE-FETCH (10) */
720 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
721 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
722 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
723 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
724 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
725 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
726 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
727 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
729 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
730 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
733 static int sdebug_num_hosts;
734 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
735 static int sdebug_ato = DEF_ATO;
736 static int sdebug_cdb_len = DEF_CDB_LEN;
737 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
738 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
739 static int sdebug_dif = DEF_DIF;
740 static int sdebug_dix = DEF_DIX;
741 static int sdebug_dsense = DEF_D_SENSE;
742 static int sdebug_every_nth = DEF_EVERY_NTH;
743 static int sdebug_fake_rw = DEF_FAKE_RW;
744 static unsigned int sdebug_guard = DEF_GUARD;
745 static int sdebug_host_max_queue; /* per host */
746 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
747 static int sdebug_max_luns = DEF_MAX_LUNS;
748 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
749 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
750 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
751 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
752 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
753 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
754 static int sdebug_no_uld;
755 static int sdebug_num_parts = DEF_NUM_PARTS;
756 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
757 static int sdebug_opt_blks = DEF_OPT_BLKS;
758 static int sdebug_opts = DEF_OPTS;
759 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
760 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
761 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
762 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
763 static int sdebug_sector_size = DEF_SECTOR_SIZE;
764 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
765 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
766 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
767 static unsigned int sdebug_lbpu = DEF_LBPU;
768 static unsigned int sdebug_lbpws = DEF_LBPWS;
769 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
770 static unsigned int sdebug_lbprz = DEF_LBPRZ;
771 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
772 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
773 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
774 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
775 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
776 static int sdebug_uuid_ctl = DEF_UUID_CTL;
777 static bool sdebug_random = DEF_RANDOM;
778 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
779 static bool sdebug_removable = DEF_REMOVABLE;
780 static bool sdebug_clustering;
781 static bool sdebug_host_lock = DEF_HOST_LOCK;
782 static bool sdebug_strict = DEF_STRICT;
783 static bool sdebug_any_injecting_opt;
784 static bool sdebug_verbose;
785 static bool have_dif_prot;
786 static bool write_since_sync;
787 static bool sdebug_statistics = DEF_STATISTICS;
788 static bool sdebug_wp;
789 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
790 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
791 static char *sdeb_zbc_model_s;
793 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
794 SAM_LUN_AM_FLAT = 0x1,
795 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
796 SAM_LUN_AM_EXTENDED = 0x3};
797 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
798 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
800 static unsigned int sdebug_store_sectors;
801 static sector_t sdebug_capacity; /* in sectors */
803 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
804 may still need them */
805 static int sdebug_heads; /* heads per disk */
806 static int sdebug_cylinders_per; /* cylinders per surface */
807 static int sdebug_sectors_per; /* sectors per cylinder */
809 static LIST_HEAD(sdebug_host_list);
810 static DEFINE_SPINLOCK(sdebug_host_list_lock);
812 static struct xarray per_store_arr;
813 static struct xarray *per_store_ap = &per_store_arr;
814 static int sdeb_first_idx = -1; /* invalid index ==> none created */
815 static int sdeb_most_recent_idx = -1;
816 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
818 static unsigned long map_size;
819 static int num_aborts;
820 static int num_dev_resets;
821 static int num_target_resets;
822 static int num_bus_resets;
823 static int num_host_resets;
824 static int dix_writes;
825 static int dix_reads;
826 static int dif_errors;
828 /* ZBC global data */
829 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
830 static int sdeb_zbc_zone_size_mb;
831 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
832 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
834 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
835 static int poll_queues; /* iouring iopoll interface.*/
836 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
838 static DEFINE_RWLOCK(atomic_rw);
839 static DEFINE_RWLOCK(atomic_rw2);
841 static rwlock_t *ramdisk_lck_a[2];
843 static char sdebug_proc_name[] = MY_NAME;
844 static const char *my_name = MY_NAME;
846 static struct bus_type pseudo_lld_bus;
848 static struct device_driver sdebug_driverfs_driver = {
849 .name = sdebug_proc_name,
850 .bus = &pseudo_lld_bus,
853 static const int check_condition_result =
854 SAM_STAT_CHECK_CONDITION;
856 static const int illegal_condition_result =
857 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
859 static const int device_qfull_result =
860 (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
862 static const int condition_met_result = SAM_STAT_CONDITION_MET;
865 /* Only do the extra work involved in logical block provisioning if one or
866 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
867 * real reads and writes (i.e. not skipping them for speed).
869 static inline bool scsi_debug_lbp(void)
871 return 0 == sdebug_fake_rw &&
872 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
875 static void *lba2fake_store(struct sdeb_store_info *sip,
876 unsigned long long lba)
878 struct sdeb_store_info *lsip = sip;
880 lba = do_div(lba, sdebug_store_sectors);
881 if (!sip || !sip->storep) {
883 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
885 return lsip->storep + lba * sdebug_sector_size;
888 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
891 sector = sector_div(sector, sdebug_store_sectors);
893 return sip->dif_storep + sector;
896 static void sdebug_max_tgts_luns(void)
898 struct sdebug_host_info *sdbg_host;
899 struct Scsi_Host *hpnt;
901 spin_lock(&sdebug_host_list_lock);
902 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
903 hpnt = sdbg_host->shost;
904 if ((hpnt->this_id >= 0) &&
905 (sdebug_num_tgts > hpnt->this_id))
906 hpnt->max_id = sdebug_num_tgts + 1;
908 hpnt->max_id = sdebug_num_tgts;
909 /* sdebug_max_luns; */
910 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
912 spin_unlock(&sdebug_host_list_lock);
915 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
917 /* Set in_bit to -1 to indicate no bit position of invalid field */
918 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
919 enum sdeb_cmd_data c_d,
920 int in_byte, int in_bit)
922 unsigned char *sbuff;
926 sbuff = scp->sense_buffer;
928 sdev_printk(KERN_ERR, scp->device,
929 "%s: sense_buffer is NULL\n", __func__);
932 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
933 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
934 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
935 memset(sks, 0, sizeof(sks));
941 sks[0] |= 0x7 & in_bit;
943 put_unaligned_be16(in_byte, sks + 1);
949 memcpy(sbuff + sl + 4, sks, 3);
951 memcpy(sbuff + 15, sks, 3);
953 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
954 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
955 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
958 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
960 if (!scp->sense_buffer) {
961 sdev_printk(KERN_ERR, scp->device,
962 "%s: sense_buffer is NULL\n", __func__);
965 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
967 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
970 sdev_printk(KERN_INFO, scp->device,
971 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
972 my_name, key, asc, asq);
975 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
977 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
980 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
983 if (sdebug_verbose) {
985 sdev_printk(KERN_INFO, dev,
986 "%s: BLKFLSBUF [0x1261]\n", __func__);
987 else if (0x5331 == cmd)
988 sdev_printk(KERN_INFO, dev,
989 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
992 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
996 /* return -ENOTTY; // correct return but upsets fdisk */
999 static void config_cdb_len(struct scsi_device *sdev)
1001 switch (sdebug_cdb_len) {
1002 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1003 sdev->use_10_for_rw = false;
1004 sdev->use_16_for_rw = false;
1005 sdev->use_10_for_ms = false;
1007 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1008 sdev->use_10_for_rw = true;
1009 sdev->use_16_for_rw = false;
1010 sdev->use_10_for_ms = false;
1012 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1013 sdev->use_10_for_rw = true;
1014 sdev->use_16_for_rw = false;
1015 sdev->use_10_for_ms = true;
1018 sdev->use_10_for_rw = false;
1019 sdev->use_16_for_rw = true;
1020 sdev->use_10_for_ms = true;
1022 case 32: /* No knobs to suggest this so same as 16 for now */
1023 sdev->use_10_for_rw = false;
1024 sdev->use_16_for_rw = true;
1025 sdev->use_10_for_ms = true;
1028 pr_warn("unexpected cdb_len=%d, force to 10\n",
1030 sdev->use_10_for_rw = true;
1031 sdev->use_16_for_rw = false;
1032 sdev->use_10_for_ms = false;
1033 sdebug_cdb_len = 10;
1038 static void all_config_cdb_len(void)
1040 struct sdebug_host_info *sdbg_host;
1041 struct Scsi_Host *shost;
1042 struct scsi_device *sdev;
1044 spin_lock(&sdebug_host_list_lock);
1045 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1046 shost = sdbg_host->shost;
1047 shost_for_each_device(sdev, shost) {
1048 config_cdb_len(sdev);
1051 spin_unlock(&sdebug_host_list_lock);
1054 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1056 struct sdebug_host_info *sdhp;
1057 struct sdebug_dev_info *dp;
1059 spin_lock(&sdebug_host_list_lock);
1060 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1061 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1062 if ((devip->sdbg_host == dp->sdbg_host) &&
1063 (devip->target == dp->target))
1064 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1067 spin_unlock(&sdebug_host_list_lock);
1070 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1074 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1075 if (k != SDEBUG_NUM_UAS) {
1076 const char *cp = NULL;
1080 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1081 POWER_ON_RESET_ASCQ);
1083 cp = "power on reset";
1085 case SDEBUG_UA_BUS_RESET:
1086 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1091 case SDEBUG_UA_MODE_CHANGED:
1092 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1095 cp = "mode parameters changed";
1097 case SDEBUG_UA_CAPACITY_CHANGED:
1098 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1099 CAPACITY_CHANGED_ASCQ);
1101 cp = "capacity data changed";
1103 case SDEBUG_UA_MICROCODE_CHANGED:
1104 mk_sense_buffer(scp, UNIT_ATTENTION,
1106 MICROCODE_CHANGED_ASCQ);
1108 cp = "microcode has been changed";
1110 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1111 mk_sense_buffer(scp, UNIT_ATTENTION,
1113 MICROCODE_CHANGED_WO_RESET_ASCQ);
1115 cp = "microcode has been changed without reset";
1117 case SDEBUG_UA_LUNS_CHANGED:
1119 * SPC-3 behavior is to report a UNIT ATTENTION with
1120 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1121 * on the target, until a REPORT LUNS command is
1122 * received. SPC-4 behavior is to report it only once.
1123 * NOTE: sdebug_scsi_level does not use the same
1124 * values as struct scsi_device->scsi_level.
1126 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1127 clear_luns_changed_on_target(devip);
1128 mk_sense_buffer(scp, UNIT_ATTENTION,
1132 cp = "reported luns data has changed";
1135 pr_warn("unexpected unit attention code=%d\n", k);
1140 clear_bit(k, devip->uas_bm);
1142 sdev_printk(KERN_INFO, scp->device,
1143 "%s reports: Unit attention: %s\n",
1145 return check_condition_result;
1150 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1151 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1155 struct scsi_data_buffer *sdb = &scp->sdb;
1159 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1160 return DID_ERROR << 16;
1162 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1164 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1169 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1170 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1171 * calls, not required to write in ascending offset order. Assumes resid
1172 * set to scsi_bufflen() prior to any calls.
1174 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1175 int arr_len, unsigned int off_dst)
1177 unsigned int act_len, n;
1178 struct scsi_data_buffer *sdb = &scp->sdb;
1179 off_t skip = off_dst;
1181 if (sdb->length <= off_dst)
1183 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1184 return DID_ERROR << 16;
1186 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1187 arr, arr_len, skip);
1188 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1189 __func__, off_dst, scsi_bufflen(scp), act_len,
1190 scsi_get_resid(scp));
1191 n = scsi_bufflen(scp) - (off_dst + act_len);
1192 scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1196 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1197 * 'arr' or -1 if error.
1199 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1202 if (!scsi_bufflen(scp))
1204 if (scp->sc_data_direction != DMA_TO_DEVICE)
1207 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1211 static char sdebug_inq_vendor_id[9] = "Linux ";
1212 static char sdebug_inq_product_id[17] = "scsi_debug ";
1213 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1214 /* Use some locally assigned NAAs for SAS addresses. */
1215 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1216 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1217 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1219 /* Device identification VPD page. Returns number of bytes placed in arr */
1220 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1221 int target_dev_id, int dev_id_num,
1222 const char *dev_id_str, int dev_id_str_len,
1223 const uuid_t *lu_name)
1228 port_a = target_dev_id + 1;
1229 /* T10 vendor identifier field format (faked) */
1230 arr[0] = 0x2; /* ASCII */
1233 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1234 memcpy(&arr[12], sdebug_inq_product_id, 16);
1235 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1236 num = 8 + 16 + dev_id_str_len;
1239 if (dev_id_num >= 0) {
1240 if (sdebug_uuid_ctl) {
1241 /* Locally assigned UUID */
1242 arr[num++] = 0x1; /* binary (not necessarily sas) */
1243 arr[num++] = 0xa; /* PIV=0, lu, naa */
1246 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1248 memcpy(arr + num, lu_name, 16);
1251 /* NAA-3, Logical unit identifier (binary) */
1252 arr[num++] = 0x1; /* binary (not necessarily sas) */
1253 arr[num++] = 0x3; /* PIV=0, lu, naa */
1256 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1259 /* Target relative port number */
1260 arr[num++] = 0x61; /* proto=sas, binary */
1261 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1262 arr[num++] = 0x0; /* reserved */
1263 arr[num++] = 0x4; /* length */
1264 arr[num++] = 0x0; /* reserved */
1265 arr[num++] = 0x0; /* reserved */
1267 arr[num++] = 0x1; /* relative port A */
1269 /* NAA-3, Target port identifier */
1270 arr[num++] = 0x61; /* proto=sas, binary */
1271 arr[num++] = 0x93; /* piv=1, target port, naa */
1274 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1276 /* NAA-3, Target port group identifier */
1277 arr[num++] = 0x61; /* proto=sas, binary */
1278 arr[num++] = 0x95; /* piv=1, target port group id */
1283 put_unaligned_be16(port_group_id, arr + num);
1285 /* NAA-3, Target device identifier */
1286 arr[num++] = 0x61; /* proto=sas, binary */
1287 arr[num++] = 0xa3; /* piv=1, target device, naa */
1290 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1292 /* SCSI name string: Target device identifier */
1293 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1294 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1297 memcpy(arr + num, "naa.32222220", 12);
1299 snprintf(b, sizeof(b), "%08X", target_dev_id);
1300 memcpy(arr + num, b, 8);
1302 memset(arr + num, 0, 4);
1307 static unsigned char vpd84_data[] = {
1308 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1309 0x22,0x22,0x22,0x0,0xbb,0x1,
1310 0x22,0x22,0x22,0x0,0xbb,0x2,
1313 /* Software interface identification VPD page */
1314 static int inquiry_vpd_84(unsigned char *arr)
1316 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1317 return sizeof(vpd84_data);
1320 /* Management network addresses VPD page */
1321 static int inquiry_vpd_85(unsigned char *arr)
1324 const char *na1 = "https://www.kernel.org/config";
1325 const char *na2 = "http://www.kernel.org/log";
1328 arr[num++] = 0x1; /* lu, storage config */
1329 arr[num++] = 0x0; /* reserved */
1334 plen = ((plen / 4) + 1) * 4;
1335 arr[num++] = plen; /* length, null termianted, padded */
1336 memcpy(arr + num, na1, olen);
1337 memset(arr + num + olen, 0, plen - olen);
1340 arr[num++] = 0x4; /* lu, logging */
1341 arr[num++] = 0x0; /* reserved */
1346 plen = ((plen / 4) + 1) * 4;
1347 arr[num++] = plen; /* length, null terminated, padded */
1348 memcpy(arr + num, na2, olen);
1349 memset(arr + num + olen, 0, plen - olen);
1355 /* SCSI ports VPD page */
1356 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1361 port_a = target_dev_id + 1;
1362 port_b = port_a + 1;
1363 arr[num++] = 0x0; /* reserved */
1364 arr[num++] = 0x0; /* reserved */
1366 arr[num++] = 0x1; /* relative port 1 (primary) */
1367 memset(arr + num, 0, 6);
1370 arr[num++] = 12; /* length tp descriptor */
1371 /* naa-5 target port identifier (A) */
1372 arr[num++] = 0x61; /* proto=sas, binary */
1373 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1374 arr[num++] = 0x0; /* reserved */
1375 arr[num++] = 0x8; /* length */
1376 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1378 arr[num++] = 0x0; /* reserved */
1379 arr[num++] = 0x0; /* reserved */
1381 arr[num++] = 0x2; /* relative port 2 (secondary) */
1382 memset(arr + num, 0, 6);
1385 arr[num++] = 12; /* length tp descriptor */
1386 /* naa-5 target port identifier (B) */
1387 arr[num++] = 0x61; /* proto=sas, binary */
1388 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1389 arr[num++] = 0x0; /* reserved */
1390 arr[num++] = 0x8; /* length */
1391 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1398 static unsigned char vpd89_data[] = {
1399 /* from 4th byte */ 0,0,0,0,
1400 'l','i','n','u','x',' ',' ',' ',
1401 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1403 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1405 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1406 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1407 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1408 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1410 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1412 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1414 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1415 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1416 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1417 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1418 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1419 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1420 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1424 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1425 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1426 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1427 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1442 /* ATA Information VPD page */
1443 static int inquiry_vpd_89(unsigned char *arr)
1445 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1446 return sizeof(vpd89_data);
1450 static unsigned char vpdb0_data[] = {
1451 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 /* Block limits VPD page (SBC-3) */
1458 static int inquiry_vpd_b0(unsigned char *arr)
1462 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1464 /* Optimal transfer length granularity */
1465 if (sdebug_opt_xferlen_exp != 0 &&
1466 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1467 gran = 1 << sdebug_opt_xferlen_exp;
1469 gran = 1 << sdebug_physblk_exp;
1470 put_unaligned_be16(gran, arr + 2);
1472 /* Maximum Transfer Length */
1473 if (sdebug_store_sectors > 0x400)
1474 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1476 /* Optimal Transfer Length */
1477 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1480 /* Maximum Unmap LBA Count */
1481 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1483 /* Maximum Unmap Block Descriptor Count */
1484 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1487 /* Unmap Granularity Alignment */
1488 if (sdebug_unmap_alignment) {
1489 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1490 arr[28] |= 0x80; /* UGAVALID */
1493 /* Optimal Unmap Granularity */
1494 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1496 /* Maximum WRITE SAME Length */
1497 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1499 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1501 return sizeof(vpdb0_data);
1504 /* Block device characteristics VPD page (SBC-3) */
1505 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1507 memset(arr, 0, 0x3c);
1509 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1511 arr[3] = 5; /* less than 1.8" */
1512 if (devip->zmodel == BLK_ZONED_HA)
1513 arr[4] = 1 << 4; /* zoned field = 01b */
1518 /* Logical block provisioning VPD page (SBC-4) */
1519 static int inquiry_vpd_b2(unsigned char *arr)
1521 memset(arr, 0, 0x4);
1522 arr[0] = 0; /* threshold exponent */
1529 if (sdebug_lbprz && scsi_debug_lbp())
1530 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1531 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1532 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1533 /* threshold_percentage=0 */
1537 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1538 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1540 memset(arr, 0, 0x3c);
1541 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1543 * Set Optimal number of open sequential write preferred zones and
1544 * Optimal number of non-sequentially written sequential write
1545 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1546 * fields set to zero, apart from Max. number of open swrz_s field.
1548 put_unaligned_be32(0xffffffff, &arr[4]);
1549 put_unaligned_be32(0xffffffff, &arr[8]);
1550 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1551 put_unaligned_be32(devip->max_open, &arr[12]);
1553 put_unaligned_be32(0xffffffff, &arr[12]);
1557 #define SDEBUG_LONG_INQ_SZ 96
1558 #define SDEBUG_MAX_INQ_ARR_SZ 584
1560 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1562 unsigned char pq_pdt;
1564 unsigned char *cmd = scp->cmnd;
1565 int alloc_len, n, ret;
1566 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1568 alloc_len = get_unaligned_be16(cmd + 3);
1569 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1571 return DID_REQUEUE << 16;
1572 is_disk = (sdebug_ptype == TYPE_DISK);
1573 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1574 is_disk_zbc = (is_disk || is_zbc);
1575 have_wlun = scsi_is_wlun(scp->device->lun);
1577 pq_pdt = TYPE_WLUN; /* present, wlun */
1578 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1579 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1581 pq_pdt = (sdebug_ptype & 0x1f);
1583 if (0x2 & cmd[1]) { /* CMDDT bit set */
1584 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1586 return check_condition_result;
1587 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1588 int lu_id_num, port_group_id, target_dev_id, len;
1590 int host_no = devip->sdbg_host->shost->host_no;
1592 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1593 (devip->channel & 0x7f);
1594 if (sdebug_vpd_use_hostno == 0)
1596 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1597 (devip->target * 1000) + devip->lun);
1598 target_dev_id = ((host_no + 1) * 2000) +
1599 (devip->target * 1000) - 3;
1600 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1601 if (0 == cmd[2]) { /* supported vital product data pages */
1602 arr[1] = cmd[2]; /*sanity */
1604 arr[n++] = 0x0; /* this page */
1605 arr[n++] = 0x80; /* unit serial number */
1606 arr[n++] = 0x83; /* device identification */
1607 arr[n++] = 0x84; /* software interface ident. */
1608 arr[n++] = 0x85; /* management network addresses */
1609 arr[n++] = 0x86; /* extended inquiry */
1610 arr[n++] = 0x87; /* mode page policy */
1611 arr[n++] = 0x88; /* SCSI ports */
1612 if (is_disk_zbc) { /* SBC or ZBC */
1613 arr[n++] = 0x89; /* ATA information */
1614 arr[n++] = 0xb0; /* Block limits */
1615 arr[n++] = 0xb1; /* Block characteristics */
1617 arr[n++] = 0xb2; /* LB Provisioning */
1619 arr[n++] = 0xb6; /* ZB dev. char. */
1621 arr[3] = n - 4; /* number of supported VPD pages */
1622 } else if (0x80 == cmd[2]) { /* unit serial number */
1623 arr[1] = cmd[2]; /*sanity */
1625 memcpy(&arr[4], lu_id_str, len);
1626 } else if (0x83 == cmd[2]) { /* device identification */
1627 arr[1] = cmd[2]; /*sanity */
1628 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1629 target_dev_id, lu_id_num,
1632 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1633 arr[1] = cmd[2]; /*sanity */
1634 arr[3] = inquiry_vpd_84(&arr[4]);
1635 } else if (0x85 == cmd[2]) { /* Management network addresses */
1636 arr[1] = cmd[2]; /*sanity */
1637 arr[3] = inquiry_vpd_85(&arr[4]);
1638 } else if (0x86 == cmd[2]) { /* extended inquiry */
1639 arr[1] = cmd[2]; /*sanity */
1640 arr[3] = 0x3c; /* number of following entries */
1641 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1642 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1643 else if (have_dif_prot)
1644 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1646 arr[4] = 0x0; /* no protection stuff */
1647 arr[5] = 0x7; /* head of q, ordered + simple q's */
1648 } else if (0x87 == cmd[2]) { /* mode page policy */
1649 arr[1] = cmd[2]; /*sanity */
1650 arr[3] = 0x8; /* number of following entries */
1651 arr[4] = 0x2; /* disconnect-reconnect mp */
1652 arr[6] = 0x80; /* mlus, shared */
1653 arr[8] = 0x18; /* protocol specific lu */
1654 arr[10] = 0x82; /* mlus, per initiator port */
1655 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1656 arr[1] = cmd[2]; /*sanity */
1657 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1658 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1659 arr[1] = cmd[2]; /*sanity */
1660 n = inquiry_vpd_89(&arr[4]);
1661 put_unaligned_be16(n, arr + 2);
1662 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1663 arr[1] = cmd[2]; /*sanity */
1664 arr[3] = inquiry_vpd_b0(&arr[4]);
1665 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1666 arr[1] = cmd[2]; /*sanity */
1667 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1668 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1669 arr[1] = cmd[2]; /*sanity */
1670 arr[3] = inquiry_vpd_b2(&arr[4]);
1671 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1672 arr[1] = cmd[2]; /*sanity */
1673 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1675 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1677 return check_condition_result;
1679 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1680 ret = fill_from_dev_buffer(scp, arr,
1681 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1685 /* drops through here for a standard inquiry */
1686 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1687 arr[2] = sdebug_scsi_level;
1688 arr[3] = 2; /* response_data_format==2 */
1689 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1690 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1691 if (sdebug_vpd_use_hostno == 0)
1692 arr[5] |= 0x10; /* claim: implicit TPGS */
1693 arr[6] = 0x10; /* claim: MultiP */
1694 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1695 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1696 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1697 memcpy(&arr[16], sdebug_inq_product_id, 16);
1698 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1699 /* Use Vendor Specific area to place driver date in ASCII hex */
1700 memcpy(&arr[36], sdebug_version_date, 8);
1701 /* version descriptors (2 bytes each) follow */
1702 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1703 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1705 if (is_disk) { /* SBC-4 no version claimed */
1706 put_unaligned_be16(0x600, arr + n);
1708 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1709 put_unaligned_be16(0x525, arr + n);
1711 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1712 put_unaligned_be16(0x624, arr + n);
1715 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1716 ret = fill_from_dev_buffer(scp, arr,
1717 min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1722 /* See resp_iec_m_pg() for how this data is manipulated */
1723 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1726 static int resp_requests(struct scsi_cmnd *scp,
1727 struct sdebug_dev_info *devip)
1729 unsigned char *cmd = scp->cmnd;
1730 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1731 bool dsense = !!(cmd[1] & 1);
1732 int alloc_len = cmd[4];
1734 int stopped_state = atomic_read(&devip->stopped);
1736 memset(arr, 0, sizeof(arr));
1737 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1741 arr[2] = LOGICAL_UNIT_NOT_READY;
1742 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1746 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1747 arr[7] = 0xa; /* 18 byte sense buffer */
1748 arr[12] = LOGICAL_UNIT_NOT_READY;
1749 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1751 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1752 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1755 arr[1] = 0x0; /* NO_SENSE in sense_key */
1756 arr[2] = THRESHOLD_EXCEEDED;
1757 arr[3] = 0xff; /* Failure prediction(false) */
1761 arr[2] = 0x0; /* NO_SENSE in sense_key */
1762 arr[7] = 0xa; /* 18 byte sense buffer */
1763 arr[12] = THRESHOLD_EXCEEDED;
1764 arr[13] = 0xff; /* Failure prediction(false) */
1766 } else { /* nothing to report */
1769 memset(arr, 0, len);
1772 memset(arr, 0, len);
1777 return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len));
1780 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1782 unsigned char *cmd = scp->cmnd;
1783 int power_cond, want_stop, stopped_state;
1786 power_cond = (cmd[4] & 0xf0) >> 4;
1788 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1789 return check_condition_result;
1791 want_stop = !(cmd[4] & 1);
1792 stopped_state = atomic_read(&devip->stopped);
1793 if (stopped_state == 2) {
1794 ktime_t now_ts = ktime_get_boottime();
1796 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1797 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1799 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1800 /* tur_ms_to_ready timer extinguished */
1801 atomic_set(&devip->stopped, 0);
1805 if (stopped_state == 2) {
1807 stopped_state = 1; /* dummy up success */
1808 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1809 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1810 return check_condition_result;
1814 changing = (stopped_state != want_stop);
1816 atomic_xchg(&devip->stopped, want_stop);
1817 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1818 return SDEG_RES_IMMED_MASK;
1823 static sector_t get_sdebug_capacity(void)
1825 static const unsigned int gibibyte = 1073741824;
1827 if (sdebug_virtual_gb > 0)
1828 return (sector_t)sdebug_virtual_gb *
1829 (gibibyte / sdebug_sector_size);
1831 return sdebug_store_sectors;
1834 #define SDEBUG_READCAP_ARR_SZ 8
1835 static int resp_readcap(struct scsi_cmnd *scp,
1836 struct sdebug_dev_info *devip)
1838 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1841 /* following just in case virtual_gb changed */
1842 sdebug_capacity = get_sdebug_capacity();
1843 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1844 if (sdebug_capacity < 0xffffffff) {
1845 capac = (unsigned int)sdebug_capacity - 1;
1846 put_unaligned_be32(capac, arr + 0);
1848 put_unaligned_be32(0xffffffff, arr + 0);
1849 put_unaligned_be16(sdebug_sector_size, arr + 6);
1850 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1853 #define SDEBUG_READCAP16_ARR_SZ 32
1854 static int resp_readcap16(struct scsi_cmnd *scp,
1855 struct sdebug_dev_info *devip)
1857 unsigned char *cmd = scp->cmnd;
1858 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1861 alloc_len = get_unaligned_be32(cmd + 10);
1862 /* following just in case virtual_gb changed */
1863 sdebug_capacity = get_sdebug_capacity();
1864 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1865 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1866 put_unaligned_be32(sdebug_sector_size, arr + 8);
1867 arr[13] = sdebug_physblk_exp & 0xf;
1868 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1870 if (scsi_debug_lbp()) {
1871 arr[14] |= 0x80; /* LBPME */
1872 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1873 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1874 * in the wider field maps to 0 in this field.
1876 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1880 arr[15] = sdebug_lowest_aligned & 0xff;
1882 if (have_dif_prot) {
1883 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1884 arr[12] |= 1; /* PROT_EN */
1887 return fill_from_dev_buffer(scp, arr,
1888 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1891 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1893 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1894 struct sdebug_dev_info *devip)
1896 unsigned char *cmd = scp->cmnd;
1898 int host_no = devip->sdbg_host->shost->host_no;
1899 int port_group_a, port_group_b, port_a, port_b;
1903 alen = get_unaligned_be32(cmd + 6);
1904 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1906 return DID_REQUEUE << 16;
1908 * EVPD page 0x88 states we have two ports, one
1909 * real and a fake port with no device connected.
1910 * So we create two port groups with one port each
1911 * and set the group with port B to unavailable.
1913 port_a = 0x1; /* relative port A */
1914 port_b = 0x2; /* relative port B */
1915 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1916 (devip->channel & 0x7f);
1917 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1918 (devip->channel & 0x7f) + 0x80;
1921 * The asymmetric access state is cycled according to the host_id.
1924 if (sdebug_vpd_use_hostno == 0) {
1925 arr[n++] = host_no % 3; /* Asymm access state */
1926 arr[n++] = 0x0F; /* claim: all states are supported */
1928 arr[n++] = 0x0; /* Active/Optimized path */
1929 arr[n++] = 0x01; /* only support active/optimized paths */
1931 put_unaligned_be16(port_group_a, arr + n);
1933 arr[n++] = 0; /* Reserved */
1934 arr[n++] = 0; /* Status code */
1935 arr[n++] = 0; /* Vendor unique */
1936 arr[n++] = 0x1; /* One port per group */
1937 arr[n++] = 0; /* Reserved */
1938 arr[n++] = 0; /* Reserved */
1939 put_unaligned_be16(port_a, arr + n);
1941 arr[n++] = 3; /* Port unavailable */
1942 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1943 put_unaligned_be16(port_group_b, arr + n);
1945 arr[n++] = 0; /* Reserved */
1946 arr[n++] = 0; /* Status code */
1947 arr[n++] = 0; /* Vendor unique */
1948 arr[n++] = 0x1; /* One port per group */
1949 arr[n++] = 0; /* Reserved */
1950 arr[n++] = 0; /* Reserved */
1951 put_unaligned_be16(port_b, arr + n);
1955 put_unaligned_be32(rlen, arr + 0);
1958 * Return the smallest value of either
1959 * - The allocated length
1960 * - The constructed command length
1961 * - The maximum array size
1963 rlen = min(alen, n);
1964 ret = fill_from_dev_buffer(scp, arr,
1965 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1970 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1971 struct sdebug_dev_info *devip)
1974 u8 reporting_opts, req_opcode, sdeb_i, supp;
1976 u32 alloc_len, a_len;
1977 int k, offset, len, errsts, count, bump, na;
1978 const struct opcode_info_t *oip;
1979 const struct opcode_info_t *r_oip;
1981 u8 *cmd = scp->cmnd;
1983 rctd = !!(cmd[2] & 0x80);
1984 reporting_opts = cmd[2] & 0x7;
1985 req_opcode = cmd[3];
1986 req_sa = get_unaligned_be16(cmd + 4);
1987 alloc_len = get_unaligned_be32(cmd + 6);
1988 if (alloc_len < 4 || alloc_len > 0xffff) {
1989 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1990 return check_condition_result;
1992 if (alloc_len > 8192)
1996 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1998 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2000 return check_condition_result;
2002 switch (reporting_opts) {
2003 case 0: /* all commands */
2004 /* count number of commands */
2005 for (count = 0, oip = opcode_info_arr;
2006 oip->num_attached != 0xff; ++oip) {
2007 if (F_INV_OP & oip->flags)
2009 count += (oip->num_attached + 1);
2011 bump = rctd ? 20 : 8;
2012 put_unaligned_be32(count * bump, arr);
2013 for (offset = 4, oip = opcode_info_arr;
2014 oip->num_attached != 0xff && offset < a_len; ++oip) {
2015 if (F_INV_OP & oip->flags)
2017 na = oip->num_attached;
2018 arr[offset] = oip->opcode;
2019 put_unaligned_be16(oip->sa, arr + offset + 2);
2021 arr[offset + 5] |= 0x2;
2022 if (FF_SA & oip->flags)
2023 arr[offset + 5] |= 0x1;
2024 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2026 put_unaligned_be16(0xa, arr + offset + 8);
2028 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2029 if (F_INV_OP & oip->flags)
2032 arr[offset] = oip->opcode;
2033 put_unaligned_be16(oip->sa, arr + offset + 2);
2035 arr[offset + 5] |= 0x2;
2036 if (FF_SA & oip->flags)
2037 arr[offset + 5] |= 0x1;
2038 put_unaligned_be16(oip->len_mask[0],
2041 put_unaligned_be16(0xa,
2048 case 1: /* one command: opcode only */
2049 case 2: /* one command: opcode plus service action */
2050 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2051 sdeb_i = opcode_ind_arr[req_opcode];
2052 oip = &opcode_info_arr[sdeb_i];
2053 if (F_INV_OP & oip->flags) {
2057 if (1 == reporting_opts) {
2058 if (FF_SA & oip->flags) {
2059 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2062 return check_condition_result;
2065 } else if (2 == reporting_opts &&
2066 0 == (FF_SA & oip->flags)) {
2067 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2068 kfree(arr); /* point at requested sa */
2069 return check_condition_result;
2071 if (0 == (FF_SA & oip->flags) &&
2072 req_opcode == oip->opcode)
2074 else if (0 == (FF_SA & oip->flags)) {
2075 na = oip->num_attached;
2076 for (k = 0, oip = oip->arrp; k < na;
2078 if (req_opcode == oip->opcode)
2081 supp = (k >= na) ? 1 : 3;
2082 } else if (req_sa != oip->sa) {
2083 na = oip->num_attached;
2084 for (k = 0, oip = oip->arrp; k < na;
2086 if (req_sa == oip->sa)
2089 supp = (k >= na) ? 1 : 3;
2093 u = oip->len_mask[0];
2094 put_unaligned_be16(u, arr + 2);
2095 arr[4] = oip->opcode;
2096 for (k = 1; k < u; ++k)
2097 arr[4 + k] = (k < 16) ?
2098 oip->len_mask[k] : 0xff;
2103 arr[1] = (rctd ? 0x80 : 0) | supp;
2105 put_unaligned_be16(0xa, arr + offset);
2110 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2112 return check_condition_result;
2114 offset = (offset < a_len) ? offset : a_len;
2115 len = (offset < alloc_len) ? offset : alloc_len;
2116 errsts = fill_from_dev_buffer(scp, arr, len);
2121 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2122 struct sdebug_dev_info *devip)
2127 u8 *cmd = scp->cmnd;
2129 memset(arr, 0, sizeof(arr));
2130 repd = !!(cmd[2] & 0x80);
2131 alloc_len = get_unaligned_be32(cmd + 6);
2132 if (alloc_len < 4) {
2133 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2134 return check_condition_result;
2136 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2137 arr[1] = 0x1; /* ITNRS */
2144 len = (len < alloc_len) ? len : alloc_len;
2145 return fill_from_dev_buffer(scp, arr, len);
2148 /* <<Following mode page info copied from ST318451LW>> */
2150 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2151 { /* Read-Write Error Recovery page for mode_sense */
2152 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2155 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2157 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2158 return sizeof(err_recov_pg);
2161 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2162 { /* Disconnect-Reconnect page for mode_sense */
2163 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2164 0, 0, 0, 0, 0, 0, 0, 0};
2166 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2168 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2169 return sizeof(disconnect_pg);
2172 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2173 { /* Format device page for mode_sense */
2174 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2175 0, 0, 0, 0, 0, 0, 0, 0,
2176 0, 0, 0, 0, 0x40, 0, 0, 0};
2178 memcpy(p, format_pg, sizeof(format_pg));
2179 put_unaligned_be16(sdebug_sectors_per, p + 10);
2180 put_unaligned_be16(sdebug_sector_size, p + 12);
2181 if (sdebug_removable)
2182 p[20] |= 0x20; /* should agree with INQUIRY */
2184 memset(p + 2, 0, sizeof(format_pg) - 2);
2185 return sizeof(format_pg);
2188 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2189 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2192 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2193 { /* Caching page for mode_sense */
2194 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2195 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2196 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2197 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2199 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2200 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2201 memcpy(p, caching_pg, sizeof(caching_pg));
2203 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2204 else if (2 == pcontrol)
2205 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2206 return sizeof(caching_pg);
2209 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2212 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2213 { /* Control mode page for mode_sense */
2214 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2216 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2220 ctrl_m_pg[2] |= 0x4;
2222 ctrl_m_pg[2] &= ~0x4;
2225 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2227 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2229 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2230 else if (2 == pcontrol)
2231 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2232 return sizeof(ctrl_m_pg);
2236 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2237 { /* Informational Exceptions control mode page for mode_sense */
2238 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2240 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2243 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2245 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2246 else if (2 == pcontrol)
2247 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2248 return sizeof(iec_m_pg);
2251 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2252 { /* SAS SSP mode page - short format for mode_sense */
2253 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2254 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2256 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2258 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2259 return sizeof(sas_sf_m_pg);
2263 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2265 { /* SAS phy control and discover mode page for mode_sense */
2266 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2267 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2268 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2269 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2270 0x2, 0, 0, 0, 0, 0, 0, 0,
2271 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2272 0, 0, 0, 0, 0, 0, 0, 0,
2273 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2274 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2275 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2276 0x3, 0, 0, 0, 0, 0, 0, 0,
2277 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2278 0, 0, 0, 0, 0, 0, 0, 0,
2282 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2283 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2284 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2285 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2286 port_a = target_dev_id + 1;
2287 port_b = port_a + 1;
2288 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2289 put_unaligned_be32(port_a, p + 20);
2290 put_unaligned_be32(port_b, p + 48 + 20);
2292 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2293 return sizeof(sas_pcd_m_pg);
2296 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2297 { /* SAS SSP shared protocol specific port mode subpage */
2298 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2299 0, 0, 0, 0, 0, 0, 0, 0,
2302 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2304 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2305 return sizeof(sas_sha_m_pg);
2308 #define SDEBUG_MAX_MSENSE_SZ 256
2310 static int resp_mode_sense(struct scsi_cmnd *scp,
2311 struct sdebug_dev_info *devip)
2313 int pcontrol, pcode, subpcode, bd_len;
2314 unsigned char dev_spec;
2315 int alloc_len, offset, len, target_dev_id;
2316 int target = scp->device->id;
2318 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2319 unsigned char *cmd = scp->cmnd;
2320 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2322 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2323 pcontrol = (cmd[2] & 0xc0) >> 6;
2324 pcode = cmd[2] & 0x3f;
2326 msense_6 = (MODE_SENSE == cmd[0]);
2327 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2328 is_disk = (sdebug_ptype == TYPE_DISK);
2329 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2330 if ((is_disk || is_zbc) && !dbd)
2331 bd_len = llbaa ? 16 : 8;
2334 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2335 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2336 if (0x3 == pcontrol) { /* Saving values not supported */
2337 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2338 return check_condition_result;
2340 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2341 (devip->target * 1000) - 3;
2342 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2343 if (is_disk || is_zbc) {
2344 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2356 arr[4] = 0x1; /* set LONGLBA bit */
2357 arr[7] = bd_len; /* assume 255 or less */
2361 if ((bd_len > 0) && (!sdebug_capacity))
2362 sdebug_capacity = get_sdebug_capacity();
2365 if (sdebug_capacity > 0xfffffffe)
2366 put_unaligned_be32(0xffffffff, ap + 0);
2368 put_unaligned_be32(sdebug_capacity, ap + 0);
2369 put_unaligned_be16(sdebug_sector_size, ap + 6);
2372 } else if (16 == bd_len) {
2373 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2374 put_unaligned_be32(sdebug_sector_size, ap + 12);
2379 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2380 /* TODO: Control Extension page */
2381 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2382 return check_condition_result;
2387 case 0x1: /* Read-Write error recovery page, direct access */
2388 len = resp_err_recov_pg(ap, pcontrol, target);
2391 case 0x2: /* Disconnect-Reconnect page, all devices */
2392 len = resp_disconnect_pg(ap, pcontrol, target);
2395 case 0x3: /* Format device page, direct access */
2397 len = resp_format_pg(ap, pcontrol, target);
2402 case 0x8: /* Caching page, direct access */
2403 if (is_disk || is_zbc) {
2404 len = resp_caching_pg(ap, pcontrol, target);
2409 case 0xa: /* Control Mode page, all devices */
2410 len = resp_ctrl_m_pg(ap, pcontrol, target);
2413 case 0x19: /* if spc==1 then sas phy, control+discover */
2414 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2415 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2416 return check_condition_result;
2419 if ((0x0 == subpcode) || (0xff == subpcode))
2420 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2421 if ((0x1 == subpcode) || (0xff == subpcode))
2422 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2424 if ((0x2 == subpcode) || (0xff == subpcode))
2425 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2428 case 0x1c: /* Informational Exceptions Mode page, all devices */
2429 len = resp_iec_m_pg(ap, pcontrol, target);
2432 case 0x3f: /* Read all Mode pages */
2433 if ((0 == subpcode) || (0xff == subpcode)) {
2434 len = resp_err_recov_pg(ap, pcontrol, target);
2435 len += resp_disconnect_pg(ap + len, pcontrol, target);
2437 len += resp_format_pg(ap + len, pcontrol,
2439 len += resp_caching_pg(ap + len, pcontrol,
2441 } else if (is_zbc) {
2442 len += resp_caching_pg(ap + len, pcontrol,
2445 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2446 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2447 if (0xff == subpcode) {
2448 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2449 target, target_dev_id);
2450 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2452 len += resp_iec_m_pg(ap + len, pcontrol, target);
2455 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2456 return check_condition_result;
2464 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2465 return check_condition_result;
2468 arr[0] = offset - 1;
2470 put_unaligned_be16((offset - 2), arr + 0);
2471 return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2474 #define SDEBUG_MAX_MSELECT_SZ 512
2476 static int resp_mode_select(struct scsi_cmnd *scp,
2477 struct sdebug_dev_info *devip)
2479 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2480 int param_len, res, mpage;
2481 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2482 unsigned char *cmd = scp->cmnd;
2483 int mselect6 = (MODE_SELECT == cmd[0]);
2485 memset(arr, 0, sizeof(arr));
2488 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2489 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2490 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2491 return check_condition_result;
2493 res = fetch_to_dev_buffer(scp, arr, param_len);
2495 return DID_ERROR << 16;
2496 else if (sdebug_verbose && (res < param_len))
2497 sdev_printk(KERN_INFO, scp->device,
2498 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2499 __func__, param_len, res);
2500 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2501 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2503 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2504 return check_condition_result;
2506 off = bd_len + (mselect6 ? 4 : 8);
2507 mpage = arr[off] & 0x3f;
2508 ps = !!(arr[off] & 0x80);
2510 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2511 return check_condition_result;
2513 spf = !!(arr[off] & 0x40);
2514 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2516 if ((pg_len + off) > param_len) {
2517 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2518 PARAMETER_LIST_LENGTH_ERR, 0);
2519 return check_condition_result;
2522 case 0x8: /* Caching Mode page */
2523 if (caching_pg[1] == arr[off + 1]) {
2524 memcpy(caching_pg + 2, arr + off + 2,
2525 sizeof(caching_pg) - 2);
2526 goto set_mode_changed_ua;
2529 case 0xa: /* Control Mode page */
2530 if (ctrl_m_pg[1] == arr[off + 1]) {
2531 memcpy(ctrl_m_pg + 2, arr + off + 2,
2532 sizeof(ctrl_m_pg) - 2);
2533 if (ctrl_m_pg[4] & 0x8)
2537 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2538 goto set_mode_changed_ua;
2541 case 0x1c: /* Informational Exceptions Mode page */
2542 if (iec_m_pg[1] == arr[off + 1]) {
2543 memcpy(iec_m_pg + 2, arr + off + 2,
2544 sizeof(iec_m_pg) - 2);
2545 goto set_mode_changed_ua;
2551 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2552 return check_condition_result;
2553 set_mode_changed_ua:
2554 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2558 static int resp_temp_l_pg(unsigned char *arr)
2560 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2561 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2564 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2565 return sizeof(temp_l_pg);
2568 static int resp_ie_l_pg(unsigned char *arr)
2570 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2573 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2574 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2575 arr[4] = THRESHOLD_EXCEEDED;
2578 return sizeof(ie_l_pg);
2581 #define SDEBUG_MAX_LSENSE_SZ 512
2583 static int resp_log_sense(struct scsi_cmnd *scp,
2584 struct sdebug_dev_info *devip)
2586 int ppc, sp, pcode, subpcode, alloc_len, len, n;
2587 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2588 unsigned char *cmd = scp->cmnd;
2590 memset(arr, 0, sizeof(arr));
2594 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2595 return check_condition_result;
2597 pcode = cmd[2] & 0x3f;
2598 subpcode = cmd[3] & 0xff;
2599 alloc_len = get_unaligned_be16(cmd + 7);
2601 if (0 == subpcode) {
2603 case 0x0: /* Supported log pages log page */
2605 arr[n++] = 0x0; /* this page */
2606 arr[n++] = 0xd; /* Temperature */
2607 arr[n++] = 0x2f; /* Informational exceptions */
2610 case 0xd: /* Temperature log page */
2611 arr[3] = resp_temp_l_pg(arr + 4);
2613 case 0x2f: /* Informational exceptions log page */
2614 arr[3] = resp_ie_l_pg(arr + 4);
2617 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2618 return check_condition_result;
2620 } else if (0xff == subpcode) {
2624 case 0x0: /* Supported log pages and subpages log page */
2627 arr[n++] = 0x0; /* 0,0 page */
2629 arr[n++] = 0xff; /* this page */
2631 arr[n++] = 0x0; /* Temperature */
2633 arr[n++] = 0x0; /* Informational exceptions */
2636 case 0xd: /* Temperature subpages */
2639 arr[n++] = 0x0; /* Temperature */
2642 case 0x2f: /* Informational exceptions subpages */
2645 arr[n++] = 0x0; /* Informational exceptions */
2649 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2650 return check_condition_result;
2653 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2654 return check_condition_result;
2656 len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2657 return fill_from_dev_buffer(scp, arr,
2658 min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2661 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2663 return devip->nr_zones != 0;
2666 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2667 unsigned long long lba)
2669 return &devip->zstate[lba >> devip->zsize_shift];
2672 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2674 return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2677 static void zbc_close_zone(struct sdebug_dev_info *devip,
2678 struct sdeb_zone_state *zsp)
2680 enum sdebug_z_cond zc;
2682 if (zbc_zone_is_conv(zsp))
2686 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2689 if (zc == ZC2_IMPLICIT_OPEN)
2690 devip->nr_imp_open--;
2692 devip->nr_exp_open--;
2694 if (zsp->z_wp == zsp->z_start) {
2695 zsp->z_cond = ZC1_EMPTY;
2697 zsp->z_cond = ZC4_CLOSED;
2702 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2704 struct sdeb_zone_state *zsp = &devip->zstate[0];
2707 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2708 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2709 zbc_close_zone(devip, zsp);
2715 static void zbc_open_zone(struct sdebug_dev_info *devip,
2716 struct sdeb_zone_state *zsp, bool explicit)
2718 enum sdebug_z_cond zc;
2720 if (zbc_zone_is_conv(zsp))
2724 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2725 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2728 /* Close an implicit open zone if necessary */
2729 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2730 zbc_close_zone(devip, zsp);
2731 else if (devip->max_open &&
2732 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2733 zbc_close_imp_open_zone(devip);
2735 if (zsp->z_cond == ZC4_CLOSED)
2738 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2739 devip->nr_exp_open++;
2741 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2742 devip->nr_imp_open++;
2746 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2747 unsigned long long lba, unsigned int num)
2749 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2750 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2752 if (zbc_zone_is_conv(zsp))
2755 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2757 if (zsp->z_wp >= zend)
2758 zsp->z_cond = ZC5_FULL;
2763 if (lba != zsp->z_wp)
2764 zsp->z_non_seq_resource = true;
2770 } else if (end > zsp->z_wp) {
2776 if (zsp->z_wp >= zend)
2777 zsp->z_cond = ZC5_FULL;
2783 zend = zsp->z_start + zsp->z_size;
2788 static int check_zbc_access_params(struct scsi_cmnd *scp,
2789 unsigned long long lba, unsigned int num, bool write)
2791 struct scsi_device *sdp = scp->device;
2792 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2793 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2794 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2797 if (devip->zmodel == BLK_ZONED_HA)
2799 /* For host-managed, reads cannot cross zone types boundaries */
2800 if (zsp_end != zsp &&
2801 zbc_zone_is_conv(zsp) &&
2802 !zbc_zone_is_conv(zsp_end)) {
2803 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2806 return check_condition_result;
2811 /* No restrictions for writes within conventional zones */
2812 if (zbc_zone_is_conv(zsp)) {
2813 if (!zbc_zone_is_conv(zsp_end)) {
2814 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2816 WRITE_BOUNDARY_ASCQ);
2817 return check_condition_result;
2822 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2823 /* Writes cannot cross sequential zone boundaries */
2824 if (zsp_end != zsp) {
2825 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2827 WRITE_BOUNDARY_ASCQ);
2828 return check_condition_result;
2830 /* Cannot write full zones */
2831 if (zsp->z_cond == ZC5_FULL) {
2832 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2833 INVALID_FIELD_IN_CDB, 0);
2834 return check_condition_result;
2836 /* Writes must be aligned to the zone WP */
2837 if (lba != zsp->z_wp) {
2838 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2840 UNALIGNED_WRITE_ASCQ);
2841 return check_condition_result;
2845 /* Handle implicit open of closed and empty zones */
2846 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2847 if (devip->max_open &&
2848 devip->nr_exp_open >= devip->max_open) {
2849 mk_sense_buffer(scp, DATA_PROTECT,
2852 return check_condition_result;
2854 zbc_open_zone(devip, zsp, false);
2860 static inline int check_device_access_params
2861 (struct scsi_cmnd *scp, unsigned long long lba,
2862 unsigned int num, bool write)
2864 struct scsi_device *sdp = scp->device;
2865 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2867 if (lba + num > sdebug_capacity) {
2868 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2869 return check_condition_result;
2871 /* transfer length excessive (tie in to block limits VPD page) */
2872 if (num > sdebug_store_sectors) {
2873 /* needs work to find which cdb byte 'num' comes from */
2874 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2875 return check_condition_result;
2877 if (write && unlikely(sdebug_wp)) {
2878 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2879 return check_condition_result;
2881 if (sdebug_dev_is_zoned(devip))
2882 return check_zbc_access_params(scp, lba, num, write);
2888 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2889 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2890 * that access any of the "stores" in struct sdeb_store_info should call this
2891 * function with bug_if_fake_rw set to true.
2893 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2894 bool bug_if_fake_rw)
2896 if (sdebug_fake_rw) {
2897 BUG_ON(bug_if_fake_rw); /* See note above */
2900 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2903 /* Returns number of bytes copied or -1 if error. */
2904 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2905 u32 sg_skip, u64 lba, u32 num, bool do_write)
2908 u64 block, rest = 0;
2909 enum dma_data_direction dir;
2910 struct scsi_data_buffer *sdb = &scp->sdb;
2914 dir = DMA_TO_DEVICE;
2915 write_since_sync = true;
2917 dir = DMA_FROM_DEVICE;
2920 if (!sdb->length || !sip)
2922 if (scp->sc_data_direction != dir)
2926 block = do_div(lba, sdebug_store_sectors);
2927 if (block + num > sdebug_store_sectors)
2928 rest = block + num - sdebug_store_sectors;
2930 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2931 fsp + (block * sdebug_sector_size),
2932 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2933 if (ret != (num - rest) * sdebug_sector_size)
2937 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2938 fsp, rest * sdebug_sector_size,
2939 sg_skip + ((num - rest) * sdebug_sector_size),
2946 /* Returns number of bytes copied or -1 if error. */
2947 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2949 struct scsi_data_buffer *sdb = &scp->sdb;
2953 if (scp->sc_data_direction != DMA_TO_DEVICE)
2955 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2956 num * sdebug_sector_size, 0, true);
2959 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2960 * arr into sip->storep+lba and return true. If comparison fails then
2962 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2963 const u8 *arr, bool compare_only)
2966 u64 block, rest = 0;
2967 u32 store_blks = sdebug_store_sectors;
2968 u32 lb_size = sdebug_sector_size;
2969 u8 *fsp = sip->storep;
2971 block = do_div(lba, store_blks);
2972 if (block + num > store_blks)
2973 rest = block + num - store_blks;
2975 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2979 res = memcmp(fsp, arr + ((num - rest) * lb_size),
2985 arr += num * lb_size;
2986 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2988 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2992 static __be16 dif_compute_csum(const void *buf, int len)
2997 csum = (__force __be16)ip_compute_csum(buf, len);
2999 csum = cpu_to_be16(crc_t10dif(buf, len));
3004 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3005 sector_t sector, u32 ei_lba)
3007 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3009 if (sdt->guard_tag != csum) {
3010 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3011 (unsigned long)sector,
3012 be16_to_cpu(sdt->guard_tag),
3016 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3017 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3018 pr_err("REF check failed on sector %lu\n",
3019 (unsigned long)sector);
3022 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3023 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3024 pr_err("REF check failed on sector %lu\n",
3025 (unsigned long)sector);
3031 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3032 unsigned int sectors, bool read)
3036 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3037 scp->device->hostdata, true);
3038 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3039 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3040 struct sg_mapping_iter miter;
3042 /* Bytes of protection data to copy into sgl */
3043 resid = sectors * sizeof(*dif_storep);
3045 sg_miter_start(&miter, scsi_prot_sglist(scp),
3046 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3047 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3049 while (sg_miter_next(&miter) && resid > 0) {
3050 size_t len = min_t(size_t, miter.length, resid);
3051 void *start = dif_store(sip, sector);
3054 if (dif_store_end < start + len)
3055 rest = start + len - dif_store_end;
3060 memcpy(paddr, start, len - rest);
3062 memcpy(start, paddr, len - rest);
3066 memcpy(paddr + len - rest, dif_storep, rest);
3068 memcpy(dif_storep, paddr + len - rest, rest);
3071 sector += len / sizeof(*dif_storep);
3074 sg_miter_stop(&miter);
3077 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3078 unsigned int sectors, u32 ei_lba)
3083 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3084 scp->device->hostdata, true);
3085 struct t10_pi_tuple *sdt;
3087 for (i = 0; i < sectors; i++, ei_lba++) {
3088 sector = start_sec + i;
3089 sdt = dif_store(sip, sector);
3091 if (sdt->app_tag == cpu_to_be16(0xffff))
3095 * Because scsi_debug acts as both initiator and
3096 * target we proceed to verify the PI even if
3097 * RDPROTECT=3. This is done so the "initiator" knows
3098 * which type of error to return. Otherwise we would
3099 * have to iterate over the PI twice.
3101 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3102 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3111 dif_copy_prot(scp, start_sec, sectors, true);
3117 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3124 struct sdeb_store_info *sip = devip2sip(devip, true);
3125 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3126 u8 *cmd = scp->cmnd;
3131 lba = get_unaligned_be64(cmd + 2);
3132 num = get_unaligned_be32(cmd + 10);
3137 lba = get_unaligned_be32(cmd + 2);
3138 num = get_unaligned_be16(cmd + 7);
3143 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3144 (u32)(cmd[1] & 0x1f) << 16;
3145 num = (0 == cmd[4]) ? 256 : cmd[4];
3150 lba = get_unaligned_be32(cmd + 2);
3151 num = get_unaligned_be32(cmd + 6);
3154 case XDWRITEREAD_10:
3156 lba = get_unaligned_be32(cmd + 2);
3157 num = get_unaligned_be16(cmd + 7);
3160 default: /* assume READ(32) */
3161 lba = get_unaligned_be64(cmd + 12);
3162 ei_lba = get_unaligned_be32(cmd + 20);
3163 num = get_unaligned_be32(cmd + 28);
3167 if (unlikely(have_dif_prot && check_prot)) {
3168 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3170 mk_sense_invalid_opcode(scp);
3171 return check_condition_result;
3173 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3174 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3175 (cmd[1] & 0xe0) == 0)
3176 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3179 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3180 atomic_read(&sdeb_inject_pending))) {
3182 atomic_set(&sdeb_inject_pending, 0);
3185 ret = check_device_access_params(scp, lba, num, false);
3188 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3189 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3190 ((lba + num) > sdebug_medium_error_start))) {
3191 /* claim unrecoverable read error */
3192 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3193 /* set info field and valid bit for fixed descriptor */
3194 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3195 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3196 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3197 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3198 put_unaligned_be32(ret, scp->sense_buffer + 3);
3200 scsi_set_resid(scp, scsi_bufflen(scp));
3201 return check_condition_result;
3204 read_lock(macc_lckp);
3207 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3208 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3209 case 1: /* Guard tag error */
3210 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3211 read_unlock(macc_lckp);
3212 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3213 return check_condition_result;
3214 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3215 read_unlock(macc_lckp);
3216 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3217 return illegal_condition_result;
3220 case 3: /* Reference tag error */
3221 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3222 read_unlock(macc_lckp);
3223 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3224 return check_condition_result;
3225 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3226 read_unlock(macc_lckp);
3227 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3228 return illegal_condition_result;
3234 ret = do_device_access(sip, scp, 0, lba, num, false);
3235 read_unlock(macc_lckp);
3236 if (unlikely(ret == -1))
3237 return DID_ERROR << 16;
3239 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3241 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3242 atomic_read(&sdeb_inject_pending))) {
3243 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3244 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3245 atomic_set(&sdeb_inject_pending, 0);
3246 return check_condition_result;
3247 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3248 /* Logical block guard check failed */
3249 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3250 atomic_set(&sdeb_inject_pending, 0);
3251 return illegal_condition_result;
3252 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3253 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3254 atomic_set(&sdeb_inject_pending, 0);
3255 return illegal_condition_result;
3261 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3262 unsigned int sectors, u32 ei_lba)
3265 struct t10_pi_tuple *sdt;
3267 sector_t sector = start_sec;
3270 struct sg_mapping_iter diter;
3271 struct sg_mapping_iter piter;
3273 BUG_ON(scsi_sg_count(SCpnt) == 0);
3274 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3276 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3277 scsi_prot_sg_count(SCpnt),
3278 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3279 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3280 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3282 /* For each protection page */
3283 while (sg_miter_next(&piter)) {
3285 if (WARN_ON(!sg_miter_next(&diter))) {
3290 for (ppage_offset = 0; ppage_offset < piter.length;
3291 ppage_offset += sizeof(struct t10_pi_tuple)) {
3292 /* If we're at the end of the current
3293 * data page advance to the next one
3295 if (dpage_offset >= diter.length) {
3296 if (WARN_ON(!sg_miter_next(&diter))) {
3303 sdt = piter.addr + ppage_offset;
3304 daddr = diter.addr + dpage_offset;
3306 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3307 ret = dif_verify(sdt, daddr, sector, ei_lba);
3314 dpage_offset += sdebug_sector_size;
3316 diter.consumed = dpage_offset;
3317 sg_miter_stop(&diter);
3319 sg_miter_stop(&piter);
3321 dif_copy_prot(SCpnt, start_sec, sectors, false);
3328 sg_miter_stop(&diter);
3329 sg_miter_stop(&piter);
3333 static unsigned long lba_to_map_index(sector_t lba)
3335 if (sdebug_unmap_alignment)
3336 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3337 sector_div(lba, sdebug_unmap_granularity);
3341 static sector_t map_index_to_lba(unsigned long index)
3343 sector_t lba = index * sdebug_unmap_granularity;
3345 if (sdebug_unmap_alignment)
3346 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3350 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3354 unsigned int mapped;
3355 unsigned long index;
3358 index = lba_to_map_index(lba);
3359 mapped = test_bit(index, sip->map_storep);
3362 next = find_next_zero_bit(sip->map_storep, map_size, index);
3364 next = find_next_bit(sip->map_storep, map_size, index);
3366 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3371 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3374 sector_t end = lba + len;
3377 unsigned long index = lba_to_map_index(lba);
3379 if (index < map_size)
3380 set_bit(index, sip->map_storep);
3382 lba = map_index_to_lba(index + 1);
3386 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3389 sector_t end = lba + len;
3390 u8 *fsp = sip->storep;
3393 unsigned long index = lba_to_map_index(lba);
3395 if (lba == map_index_to_lba(index) &&
3396 lba + sdebug_unmap_granularity <= end &&
3398 clear_bit(index, sip->map_storep);
3399 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3400 memset(fsp + lba * sdebug_sector_size,
3401 (sdebug_lbprz & 1) ? 0 : 0xff,
3402 sdebug_sector_size *
3403 sdebug_unmap_granularity);
3405 if (sip->dif_storep) {
3406 memset(sip->dif_storep + lba, 0xff,
3407 sizeof(*sip->dif_storep) *
3408 sdebug_unmap_granularity);
3411 lba = map_index_to_lba(index + 1);
3415 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3422 struct sdeb_store_info *sip = devip2sip(devip, true);
3423 rwlock_t *macc_lckp = &sip->macc_lck;
3424 u8 *cmd = scp->cmnd;
3429 lba = get_unaligned_be64(cmd + 2);
3430 num = get_unaligned_be32(cmd + 10);
3435 lba = get_unaligned_be32(cmd + 2);
3436 num = get_unaligned_be16(cmd + 7);
3441 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3442 (u32)(cmd[1] & 0x1f) << 16;
3443 num = (0 == cmd[4]) ? 256 : cmd[4];
3448 lba = get_unaligned_be32(cmd + 2);
3449 num = get_unaligned_be32(cmd + 6);
3452 case 0x53: /* XDWRITEREAD(10) */
3454 lba = get_unaligned_be32(cmd + 2);
3455 num = get_unaligned_be16(cmd + 7);
3458 default: /* assume WRITE(32) */
3459 lba = get_unaligned_be64(cmd + 12);
3460 ei_lba = get_unaligned_be32(cmd + 20);
3461 num = get_unaligned_be32(cmd + 28);
3465 if (unlikely(have_dif_prot && check_prot)) {
3466 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3468 mk_sense_invalid_opcode(scp);
3469 return check_condition_result;
3471 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3472 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3473 (cmd[1] & 0xe0) == 0)
3474 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3478 write_lock(macc_lckp);
3479 ret = check_device_access_params(scp, lba, num, true);
3481 write_unlock(macc_lckp);
3486 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3487 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3488 case 1: /* Guard tag error */
3489 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3490 write_unlock(macc_lckp);
3491 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3492 return illegal_condition_result;
3493 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3494 write_unlock(macc_lckp);
3495 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3496 return check_condition_result;
3499 case 3: /* Reference tag error */
3500 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3501 write_unlock(macc_lckp);
3502 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3503 return illegal_condition_result;
3504 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3505 write_unlock(macc_lckp);
3506 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3507 return check_condition_result;
3513 ret = do_device_access(sip, scp, 0, lba, num, true);
3514 if (unlikely(scsi_debug_lbp()))
3515 map_region(sip, lba, num);
3516 /* If ZBC zone then bump its write pointer */
3517 if (sdebug_dev_is_zoned(devip))
3518 zbc_inc_wp(devip, lba, num);
3519 write_unlock(macc_lckp);
3520 if (unlikely(-1 == ret))
3521 return DID_ERROR << 16;
3522 else if (unlikely(sdebug_verbose &&
3523 (ret < (num * sdebug_sector_size))))
3524 sdev_printk(KERN_INFO, scp->device,
3525 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3526 my_name, num * sdebug_sector_size, ret);
3528 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3529 atomic_read(&sdeb_inject_pending))) {
3530 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3531 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3532 atomic_set(&sdeb_inject_pending, 0);
3533 return check_condition_result;
3534 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3535 /* Logical block guard check failed */
3536 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3537 atomic_set(&sdeb_inject_pending, 0);
3538 return illegal_condition_result;
3539 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3540 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3541 atomic_set(&sdeb_inject_pending, 0);
3542 return illegal_condition_result;
3549 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3550 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3552 static int resp_write_scat(struct scsi_cmnd *scp,
3553 struct sdebug_dev_info *devip)
3555 u8 *cmd = scp->cmnd;
3558 struct sdeb_store_info *sip = devip2sip(devip, true);
3559 rwlock_t *macc_lckp = &sip->macc_lck;
3561 u16 lbdof, num_lrd, k;
3562 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3563 u32 lb_size = sdebug_sector_size;
3568 static const u32 lrd_size = 32; /* + parameter list header size */
3570 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3572 wrprotect = (cmd[10] >> 5) & 0x7;
3573 lbdof = get_unaligned_be16(cmd + 12);
3574 num_lrd = get_unaligned_be16(cmd + 16);
3575 bt_len = get_unaligned_be32(cmd + 28);
3576 } else { /* that leaves WRITE SCATTERED(16) */
3578 wrprotect = (cmd[2] >> 5) & 0x7;
3579 lbdof = get_unaligned_be16(cmd + 4);
3580 num_lrd = get_unaligned_be16(cmd + 8);
3581 bt_len = get_unaligned_be32(cmd + 10);
3582 if (unlikely(have_dif_prot)) {
3583 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3585 mk_sense_invalid_opcode(scp);
3586 return illegal_condition_result;
3588 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3589 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3591 sdev_printk(KERN_ERR, scp->device,
3592 "Unprotected WR to DIF device\n");
3595 if ((num_lrd == 0) || (bt_len == 0))
3596 return 0; /* T10 says these do-nothings are not errors */
3599 sdev_printk(KERN_INFO, scp->device,
3600 "%s: %s: LB Data Offset field bad\n",
3602 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3603 return illegal_condition_result;
3605 lbdof_blen = lbdof * lb_size;
3606 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3608 sdev_printk(KERN_INFO, scp->device,
3609 "%s: %s: LBA range descriptors don't fit\n",
3611 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3612 return illegal_condition_result;
3614 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3616 return SCSI_MLQUEUE_HOST_BUSY;
3618 sdev_printk(KERN_INFO, scp->device,
3619 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3620 my_name, __func__, lbdof_blen);
3621 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3623 ret = DID_ERROR << 16;
3627 write_lock(macc_lckp);
3628 sg_off = lbdof_blen;
3629 /* Spec says Buffer xfer Length field in number of LBs in dout */
3631 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3632 lba = get_unaligned_be64(up + 0);
3633 num = get_unaligned_be32(up + 8);
3635 sdev_printk(KERN_INFO, scp->device,
3636 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3637 my_name, __func__, k, lba, num, sg_off);
3640 ret = check_device_access_params(scp, lba, num, true);
3642 goto err_out_unlock;
3643 num_by = num * lb_size;
3644 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3646 if ((cum_lb + num) > bt_len) {
3648 sdev_printk(KERN_INFO, scp->device,
3649 "%s: %s: sum of blocks > data provided\n",
3651 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3653 ret = illegal_condition_result;
3654 goto err_out_unlock;
3658 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3659 int prot_ret = prot_verify_write(scp, lba, num,
3663 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3665 ret = illegal_condition_result;
3666 goto err_out_unlock;
3670 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3671 /* If ZBC zone then bump its write pointer */
3672 if (sdebug_dev_is_zoned(devip))
3673 zbc_inc_wp(devip, lba, num);
3674 if (unlikely(scsi_debug_lbp()))
3675 map_region(sip, lba, num);
3676 if (unlikely(-1 == ret)) {
3677 ret = DID_ERROR << 16;
3678 goto err_out_unlock;
3679 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3680 sdev_printk(KERN_INFO, scp->device,
3681 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3682 my_name, num_by, ret);
3684 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3685 atomic_read(&sdeb_inject_pending))) {
3686 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3687 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3688 atomic_set(&sdeb_inject_pending, 0);
3689 ret = check_condition_result;
3690 goto err_out_unlock;
3691 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3692 /* Logical block guard check failed */
3693 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3694 atomic_set(&sdeb_inject_pending, 0);
3695 ret = illegal_condition_result;
3696 goto err_out_unlock;
3697 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3698 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3699 atomic_set(&sdeb_inject_pending, 0);
3700 ret = illegal_condition_result;
3701 goto err_out_unlock;
3709 write_unlock(macc_lckp);
3715 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3716 u32 ei_lba, bool unmap, bool ndob)
3718 struct scsi_device *sdp = scp->device;
3719 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3720 unsigned long long i;
3722 u32 lb_size = sdebug_sector_size;
3724 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3725 scp->device->hostdata, true);
3726 rwlock_t *macc_lckp = &sip->macc_lck;
3730 write_lock(macc_lckp);
3732 ret = check_device_access_params(scp, lba, num, true);
3734 write_unlock(macc_lckp);
3738 if (unmap && scsi_debug_lbp()) {
3739 unmap_region(sip, lba, num);
3743 block = do_div(lbaa, sdebug_store_sectors);
3744 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3746 fs1p = fsp + (block * lb_size);
3748 memset(fs1p, 0, lb_size);
3751 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3754 write_unlock(&sip->macc_lck);
3755 return DID_ERROR << 16;
3756 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3757 sdev_printk(KERN_INFO, scp->device,
3758 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3759 my_name, "write same", lb_size, ret);
3761 /* Copy first sector to remaining blocks */
3762 for (i = 1 ; i < num ; i++) {
3764 block = do_div(lbaa, sdebug_store_sectors);
3765 memmove(fsp + (block * lb_size), fs1p, lb_size);
3767 if (scsi_debug_lbp())
3768 map_region(sip, lba, num);
3769 /* If ZBC zone then bump its write pointer */
3770 if (sdebug_dev_is_zoned(devip))
3771 zbc_inc_wp(devip, lba, num);
3773 write_unlock(macc_lckp);
3778 static int resp_write_same_10(struct scsi_cmnd *scp,
3779 struct sdebug_dev_info *devip)
3781 u8 *cmd = scp->cmnd;
3788 if (sdebug_lbpws10 == 0) {
3789 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3790 return check_condition_result;
3794 lba = get_unaligned_be32(cmd + 2);
3795 num = get_unaligned_be16(cmd + 7);
3796 if (num > sdebug_write_same_length) {
3797 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3798 return check_condition_result;
3800 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3803 static int resp_write_same_16(struct scsi_cmnd *scp,
3804 struct sdebug_dev_info *devip)
3806 u8 *cmd = scp->cmnd;
3813 if (cmd[1] & 0x8) { /* UNMAP */
3814 if (sdebug_lbpws == 0) {
3815 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3816 return check_condition_result;
3820 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3822 lba = get_unaligned_be64(cmd + 2);
3823 num = get_unaligned_be32(cmd + 10);
3824 if (num > sdebug_write_same_length) {
3825 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3826 return check_condition_result;
3828 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3831 /* Note the mode field is in the same position as the (lower) service action
3832 * field. For the Report supported operation codes command, SPC-4 suggests
3833 * each mode of this command should be reported separately; for future. */
3834 static int resp_write_buffer(struct scsi_cmnd *scp,
3835 struct sdebug_dev_info *devip)
3837 u8 *cmd = scp->cmnd;
3838 struct scsi_device *sdp = scp->device;
3839 struct sdebug_dev_info *dp;
3842 mode = cmd[1] & 0x1f;
3844 case 0x4: /* download microcode (MC) and activate (ACT) */
3845 /* set UAs on this device only */
3846 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3847 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3849 case 0x5: /* download MC, save and ACT */
3850 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3852 case 0x6: /* download MC with offsets and ACT */
3853 /* set UAs on most devices (LUs) in this target */
3854 list_for_each_entry(dp,
3855 &devip->sdbg_host->dev_info_list,
3857 if (dp->target == sdp->id) {
3858 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3860 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3864 case 0x7: /* download MC with offsets, save, and ACT */
3865 /* set UA on all devices (LUs) in this target */
3866 list_for_each_entry(dp,
3867 &devip->sdbg_host->dev_info_list,
3869 if (dp->target == sdp->id)
3870 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3874 /* do nothing for this command for other mode values */
3880 static int resp_comp_write(struct scsi_cmnd *scp,
3881 struct sdebug_dev_info *devip)
3883 u8 *cmd = scp->cmnd;
3885 struct sdeb_store_info *sip = devip2sip(devip, true);
3886 rwlock_t *macc_lckp = &sip->macc_lck;
3889 u32 lb_size = sdebug_sector_size;
3894 lba = get_unaligned_be64(cmd + 2);
3895 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3897 return 0; /* degenerate case, not an error */
3898 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3900 mk_sense_invalid_opcode(scp);
3901 return check_condition_result;
3903 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3904 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3905 (cmd[1] & 0xe0) == 0)
3906 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3908 ret = check_device_access_params(scp, lba, num, false);
3912 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3914 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3916 return check_condition_result;
3919 write_lock(macc_lckp);
3921 ret = do_dout_fetch(scp, dnum, arr);
3923 retval = DID_ERROR << 16;
3925 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3926 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3927 "indicated=%u, IO sent=%d bytes\n", my_name,
3928 dnum * lb_size, ret);
3929 if (!comp_write_worker(sip, lba, num, arr, false)) {
3930 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3931 retval = check_condition_result;
3934 if (scsi_debug_lbp())
3935 map_region(sip, lba, num);
3937 write_unlock(macc_lckp);
3942 struct unmap_block_desc {
3948 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3951 struct unmap_block_desc *desc;
3952 struct sdeb_store_info *sip = devip2sip(devip, true);
3953 rwlock_t *macc_lckp = &sip->macc_lck;
3954 unsigned int i, payload_len, descriptors;
3957 if (!scsi_debug_lbp())
3958 return 0; /* fib and say its done */
3959 payload_len = get_unaligned_be16(scp->cmnd + 7);
3960 BUG_ON(scsi_bufflen(scp) != payload_len);
3962 descriptors = (payload_len - 8) / 16;
3963 if (descriptors > sdebug_unmap_max_desc) {
3964 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3965 return check_condition_result;
3968 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3970 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3972 return check_condition_result;
3975 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3977 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3978 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3980 desc = (void *)&buf[8];
3982 write_lock(macc_lckp);
3984 for (i = 0 ; i < descriptors ; i++) {
3985 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3986 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3988 ret = check_device_access_params(scp, lba, num, true);
3992 unmap_region(sip, lba, num);
3998 write_unlock(macc_lckp);
4004 #define SDEBUG_GET_LBA_STATUS_LEN 32
4006 static int resp_get_lba_status(struct scsi_cmnd *scp,
4007 struct sdebug_dev_info *devip)
4009 u8 *cmd = scp->cmnd;
4011 u32 alloc_len, mapped, num;
4013 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4015 lba = get_unaligned_be64(cmd + 2);
4016 alloc_len = get_unaligned_be32(cmd + 10);
4021 ret = check_device_access_params(scp, lba, 1, false);
4025 if (scsi_debug_lbp()) {
4026 struct sdeb_store_info *sip = devip2sip(devip, true);
4028 mapped = map_state(sip, lba, &num);
4031 /* following just in case virtual_gb changed */
4032 sdebug_capacity = get_sdebug_capacity();
4033 if (sdebug_capacity - lba <= 0xffffffff)
4034 num = sdebug_capacity - lba;
4039 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4040 put_unaligned_be32(20, arr); /* Parameter Data Length */
4041 put_unaligned_be64(lba, arr + 8); /* LBA */
4042 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4043 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4045 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4048 static int resp_sync_cache(struct scsi_cmnd *scp,
4049 struct sdebug_dev_info *devip)
4054 u8 *cmd = scp->cmnd;
4056 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4057 lba = get_unaligned_be32(cmd + 2);
4058 num_blocks = get_unaligned_be16(cmd + 7);
4059 } else { /* SYNCHRONIZE_CACHE(16) */
4060 lba = get_unaligned_be64(cmd + 2);
4061 num_blocks = get_unaligned_be32(cmd + 10);
4063 if (lba + num_blocks > sdebug_capacity) {
4064 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4065 return check_condition_result;
4067 if (!write_since_sync || (cmd[1] & 0x2))
4068 res = SDEG_RES_IMMED_MASK;
4069 else /* delay if write_since_sync and IMMED clear */
4070 write_since_sync = false;
4075 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4076 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4077 * a GOOD status otherwise. Model a disk with a big cache and yield
4078 * CONDITION MET. Actually tries to bring range in main memory into the
4079 * cache associated with the CPU(s).
4081 static int resp_pre_fetch(struct scsi_cmnd *scp,
4082 struct sdebug_dev_info *devip)
4086 u64 block, rest = 0;
4088 u8 *cmd = scp->cmnd;
4089 struct sdeb_store_info *sip = devip2sip(devip, true);
4090 rwlock_t *macc_lckp = &sip->macc_lck;
4091 u8 *fsp = sip->storep;
4093 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4094 lba = get_unaligned_be32(cmd + 2);
4095 nblks = get_unaligned_be16(cmd + 7);
4096 } else { /* PRE-FETCH(16) */
4097 lba = get_unaligned_be64(cmd + 2);
4098 nblks = get_unaligned_be32(cmd + 10);
4100 if (lba + nblks > sdebug_capacity) {
4101 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4102 return check_condition_result;
4106 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4107 block = do_div(lba, sdebug_store_sectors);
4108 if (block + nblks > sdebug_store_sectors)
4109 rest = block + nblks - sdebug_store_sectors;
4111 /* Try to bring the PRE-FETCH range into CPU's cache */
4112 read_lock(macc_lckp);
4113 prefetch_range(fsp + (sdebug_sector_size * block),
4114 (nblks - rest) * sdebug_sector_size);
4116 prefetch_range(fsp, rest * sdebug_sector_size);
4117 read_unlock(macc_lckp);
4120 res = SDEG_RES_IMMED_MASK;
4121 return res | condition_met_result;
4124 #define RL_BUCKET_ELEMS 8
4126 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4127 * (W-LUN), the normal Linux scanning logic does not associate it with a
4128 * device (e.g. /dev/sg7). The following magic will make that association:
4129 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4130 * where <n> is a host number. If there are multiple targets in a host then
4131 * the above will associate a W-LUN to each target. To only get a W-LUN
4132 * for target 2, then use "echo '- 2 49409' > scan" .
4134 static int resp_report_luns(struct scsi_cmnd *scp,
4135 struct sdebug_dev_info *devip)
4137 unsigned char *cmd = scp->cmnd;
4138 unsigned int alloc_len;
4139 unsigned char select_report;
4141 struct scsi_lun *lun_p;
4142 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4143 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4144 unsigned int wlun_cnt; /* report luns W-LUN count */
4145 unsigned int tlun_cnt; /* total LUN count */
4146 unsigned int rlen; /* response length (in bytes) */
4148 unsigned int off_rsp = 0;
4149 const int sz_lun = sizeof(struct scsi_lun);
4151 clear_luns_changed_on_target(devip);
4153 select_report = cmd[2];
4154 alloc_len = get_unaligned_be32(cmd + 6);
4156 if (alloc_len < 4) {
4157 pr_err("alloc len too small %d\n", alloc_len);
4158 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4159 return check_condition_result;
4162 switch (select_report) {
4163 case 0: /* all LUNs apart from W-LUNs */
4164 lun_cnt = sdebug_max_luns;
4167 case 1: /* only W-LUNs */
4171 case 2: /* all LUNs */
4172 lun_cnt = sdebug_max_luns;
4175 case 0x10: /* only administrative LUs */
4176 case 0x11: /* see SPC-5 */
4177 case 0x12: /* only subsiduary LUs owned by referenced LU */
4179 pr_debug("select report invalid %d\n", select_report);
4180 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4181 return check_condition_result;
4184 if (sdebug_no_lun_0 && (lun_cnt > 0))
4187 tlun_cnt = lun_cnt + wlun_cnt;
4188 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4189 scsi_set_resid(scp, scsi_bufflen(scp));
4190 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4191 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4193 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4194 lun = sdebug_no_lun_0 ? 1 : 0;
4195 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4196 memset(arr, 0, sizeof(arr));
4197 lun_p = (struct scsi_lun *)&arr[0];
4199 put_unaligned_be32(rlen, &arr[0]);
4203 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4204 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4206 int_to_scsilun(lun++, lun_p);
4207 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4208 lun_p->scsi_lun[0] |= 0x40;
4210 if (j < RL_BUCKET_ELEMS)
4213 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4219 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4223 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4227 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4229 bool is_bytchk3 = false;
4232 u32 vnum, a_num, off;
4233 const u32 lb_size = sdebug_sector_size;
4236 u8 *cmd = scp->cmnd;
4237 struct sdeb_store_info *sip = devip2sip(devip, true);
4238 rwlock_t *macc_lckp = &sip->macc_lck;
4240 bytchk = (cmd[1] >> 1) & 0x3;
4242 return 0; /* always claim internal verify okay */
4243 } else if (bytchk == 2) {
4244 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4245 return check_condition_result;
4246 } else if (bytchk == 3) {
4247 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4251 lba = get_unaligned_be64(cmd + 2);
4252 vnum = get_unaligned_be32(cmd + 10);
4254 case VERIFY: /* is VERIFY(10) */
4255 lba = get_unaligned_be32(cmd + 2);
4256 vnum = get_unaligned_be16(cmd + 7);
4259 mk_sense_invalid_opcode(scp);
4260 return check_condition_result;
4262 a_num = is_bytchk3 ? 1 : vnum;
4263 /* Treat following check like one for read (i.e. no write) access */
4264 ret = check_device_access_params(scp, lba, a_num, false);
4268 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4270 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4272 return check_condition_result;
4274 /* Not changing store, so only need read access */
4275 read_lock(macc_lckp);
4277 ret = do_dout_fetch(scp, a_num, arr);
4279 ret = DID_ERROR << 16;
4281 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4282 sdev_printk(KERN_INFO, scp->device,
4283 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4284 my_name, __func__, a_num * lb_size, ret);
4287 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4288 memcpy(arr + off, arr, lb_size);
4291 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4292 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4293 ret = check_condition_result;
4297 read_unlock(macc_lckp);
4302 #define RZONES_DESC_HD 64
4304 /* Report zones depending on start LBA nad reporting options */
4305 static int resp_report_zones(struct scsi_cmnd *scp,
4306 struct sdebug_dev_info *devip)
4308 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4310 u32 alloc_len, rep_opts, rep_len;
4313 u8 *arr = NULL, *desc;
4314 u8 *cmd = scp->cmnd;
4315 struct sdeb_zone_state *zsp;
4316 struct sdeb_store_info *sip = devip2sip(devip, false);
4317 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4319 if (!sdebug_dev_is_zoned(devip)) {
4320 mk_sense_invalid_opcode(scp);
4321 return check_condition_result;
4323 zs_lba = get_unaligned_be64(cmd + 2);
4324 alloc_len = get_unaligned_be32(cmd + 10);
4325 rep_opts = cmd[14] & 0x3f;
4326 partial = cmd[14] & 0x80;
4328 if (zs_lba >= sdebug_capacity) {
4329 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4330 return check_condition_result;
4333 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4334 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4337 arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4339 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4341 return check_condition_result;
4344 read_lock(macc_lckp);
4347 for (i = 0; i < max_zones; i++) {
4348 lba = zs_lba + devip->zsize * i;
4349 if (lba > sdebug_capacity)
4351 zsp = zbc_zone(devip, lba);
4358 if (zsp->z_cond != ZC1_EMPTY)
4362 /* Implicit open zones */
4363 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4367 /* Explicit open zones */
4368 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4373 if (zsp->z_cond != ZC4_CLOSED)
4378 if (zsp->z_cond != ZC5_FULL)
4385 * Read-only, offline, reset WP recommended are
4386 * not emulated: no zones to report;
4390 /* non-seq-resource set */
4391 if (!zsp->z_non_seq_resource)
4395 /* Not write pointer (conventional) zones */
4396 if (!zbc_zone_is_conv(zsp))
4400 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4401 INVALID_FIELD_IN_CDB, 0);
4402 ret = check_condition_result;
4406 if (nrz < rep_max_zones) {
4407 /* Fill zone descriptor */
4408 desc[0] = zsp->z_type;
4409 desc[1] = zsp->z_cond << 4;
4410 if (zsp->z_non_seq_resource)
4412 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4413 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4414 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4418 if (partial && nrz >= rep_max_zones)
4425 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4426 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4428 rep_len = (unsigned long)desc - (unsigned long)arr;
4429 ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4432 read_unlock(macc_lckp);
4437 /* Logic transplanted from tcmu-runner, file_zbc.c */
4438 static void zbc_open_all(struct sdebug_dev_info *devip)
4440 struct sdeb_zone_state *zsp = &devip->zstate[0];
4443 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4444 if (zsp->z_cond == ZC4_CLOSED)
4445 zbc_open_zone(devip, &devip->zstate[i], true);
4449 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4453 enum sdebug_z_cond zc;
4454 u8 *cmd = scp->cmnd;
4455 struct sdeb_zone_state *zsp;
4456 bool all = cmd[14] & 0x01;
4457 struct sdeb_store_info *sip = devip2sip(devip, false);
4458 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4460 if (!sdebug_dev_is_zoned(devip)) {
4461 mk_sense_invalid_opcode(scp);
4462 return check_condition_result;
4465 write_lock(macc_lckp);
4468 /* Check if all closed zones can be open */
4469 if (devip->max_open &&
4470 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4471 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4473 res = check_condition_result;
4476 /* Open all closed zones */
4477 zbc_open_all(devip);
4481 /* Open the specified zone */
4482 z_id = get_unaligned_be64(cmd + 2);
4483 if (z_id >= sdebug_capacity) {
4484 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4485 res = check_condition_result;
4489 zsp = zbc_zone(devip, z_id);
4490 if (z_id != zsp->z_start) {
4491 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4492 res = check_condition_result;
4495 if (zbc_zone_is_conv(zsp)) {
4496 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4497 res = check_condition_result;
4502 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4505 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4506 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4508 res = check_condition_result;
4512 zbc_open_zone(devip, zsp, true);
4514 write_unlock(macc_lckp);
4518 static void zbc_close_all(struct sdebug_dev_info *devip)
4522 for (i = 0; i < devip->nr_zones; i++)
4523 zbc_close_zone(devip, &devip->zstate[i]);
4526 static int resp_close_zone(struct scsi_cmnd *scp,
4527 struct sdebug_dev_info *devip)
4531 u8 *cmd = scp->cmnd;
4532 struct sdeb_zone_state *zsp;
4533 bool all = cmd[14] & 0x01;
4534 struct sdeb_store_info *sip = devip2sip(devip, false);
4535 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4537 if (!sdebug_dev_is_zoned(devip)) {
4538 mk_sense_invalid_opcode(scp);
4539 return check_condition_result;
4542 write_lock(macc_lckp);
4545 zbc_close_all(devip);
4549 /* Close specified zone */
4550 z_id = get_unaligned_be64(cmd + 2);
4551 if (z_id >= sdebug_capacity) {
4552 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4553 res = check_condition_result;
4557 zsp = zbc_zone(devip, z_id);
4558 if (z_id != zsp->z_start) {
4559 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4560 res = check_condition_result;
4563 if (zbc_zone_is_conv(zsp)) {
4564 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4565 res = check_condition_result;
4569 zbc_close_zone(devip, zsp);
4571 write_unlock(macc_lckp);
4575 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4576 struct sdeb_zone_state *zsp, bool empty)
4578 enum sdebug_z_cond zc = zsp->z_cond;
4580 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4581 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4582 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4583 zbc_close_zone(devip, zsp);
4584 if (zsp->z_cond == ZC4_CLOSED)
4586 zsp->z_wp = zsp->z_start + zsp->z_size;
4587 zsp->z_cond = ZC5_FULL;
4591 static void zbc_finish_all(struct sdebug_dev_info *devip)
4595 for (i = 0; i < devip->nr_zones; i++)
4596 zbc_finish_zone(devip, &devip->zstate[i], false);
4599 static int resp_finish_zone(struct scsi_cmnd *scp,
4600 struct sdebug_dev_info *devip)
4602 struct sdeb_zone_state *zsp;
4605 u8 *cmd = scp->cmnd;
4606 bool all = cmd[14] & 0x01;
4607 struct sdeb_store_info *sip = devip2sip(devip, false);
4608 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4610 if (!sdebug_dev_is_zoned(devip)) {
4611 mk_sense_invalid_opcode(scp);
4612 return check_condition_result;
4615 write_lock(macc_lckp);
4618 zbc_finish_all(devip);
4622 /* Finish the specified zone */
4623 z_id = get_unaligned_be64(cmd + 2);
4624 if (z_id >= sdebug_capacity) {
4625 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4626 res = check_condition_result;
4630 zsp = zbc_zone(devip, z_id);
4631 if (z_id != zsp->z_start) {
4632 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4633 res = check_condition_result;
4636 if (zbc_zone_is_conv(zsp)) {
4637 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4638 res = check_condition_result;
4642 zbc_finish_zone(devip, zsp, true);
4644 write_unlock(macc_lckp);
4648 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4649 struct sdeb_zone_state *zsp)
4651 enum sdebug_z_cond zc;
4653 if (zbc_zone_is_conv(zsp))
4657 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4658 zbc_close_zone(devip, zsp);
4660 if (zsp->z_cond == ZC4_CLOSED)
4663 zsp->z_non_seq_resource = false;
4664 zsp->z_wp = zsp->z_start;
4665 zsp->z_cond = ZC1_EMPTY;
4668 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4672 for (i = 0; i < devip->nr_zones; i++)
4673 zbc_rwp_zone(devip, &devip->zstate[i]);
4676 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4678 struct sdeb_zone_state *zsp;
4681 u8 *cmd = scp->cmnd;
4682 bool all = cmd[14] & 0x01;
4683 struct sdeb_store_info *sip = devip2sip(devip, false);
4684 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4686 if (!sdebug_dev_is_zoned(devip)) {
4687 mk_sense_invalid_opcode(scp);
4688 return check_condition_result;
4691 write_lock(macc_lckp);
4698 z_id = get_unaligned_be64(cmd + 2);
4699 if (z_id >= sdebug_capacity) {
4700 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4701 res = check_condition_result;
4705 zsp = zbc_zone(devip, z_id);
4706 if (z_id != zsp->z_start) {
4707 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4708 res = check_condition_result;
4711 if (zbc_zone_is_conv(zsp)) {
4712 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4713 res = check_condition_result;
4717 zbc_rwp_zone(devip, zsp);
4719 write_unlock(macc_lckp);
4723 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4726 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4728 hwq = blk_mq_unique_tag_to_hwq(tag);
4730 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4731 if (WARN_ON_ONCE(hwq >= submit_queues))
4734 return sdebug_q_arr + hwq;
4737 static u32 get_tag(struct scsi_cmnd *cmnd)
4739 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4742 /* Queued (deferred) command completions converge here. */
4743 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4745 bool aborted = sd_dp->aborted;
4748 unsigned long iflags;
4749 struct sdebug_queue *sqp;
4750 struct sdebug_queued_cmd *sqcp;
4751 struct scsi_cmnd *scp;
4752 struct sdebug_dev_info *devip;
4754 if (unlikely(aborted))
4755 sd_dp->aborted = false;
4756 qc_idx = sd_dp->qc_idx;
4757 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4758 if (sdebug_statistics) {
4759 atomic_inc(&sdebug_completions);
4760 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4761 atomic_inc(&sdebug_miss_cpus);
4763 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4764 pr_err("wild qc_idx=%d\n", qc_idx);
4767 spin_lock_irqsave(&sqp->qc_lock, iflags);
4768 sd_dp->defer_t = SDEB_DEFER_NONE;
4769 sqcp = &sqp->qc_arr[qc_idx];
4771 if (unlikely(scp == NULL)) {
4772 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4773 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4774 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4777 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4779 atomic_dec(&devip->num_in_q);
4781 pr_err("devip=NULL\n");
4782 if (unlikely(atomic_read(&retired_max_queue) > 0))
4785 sqcp->a_cmnd = NULL;
4786 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4787 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4788 pr_err("Unexpected completion\n");
4792 if (unlikely(retiring)) { /* user has reduced max_queue */
4795 retval = atomic_read(&retired_max_queue);
4796 if (qc_idx >= retval) {
4797 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4798 pr_err("index %d too large\n", retval);
4801 k = find_last_bit(sqp->in_use_bm, retval);
4802 if ((k < sdebug_max_queue) || (k == retval))
4803 atomic_set(&retired_max_queue, 0);
4805 atomic_set(&retired_max_queue, k + 1);
4807 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4808 if (unlikely(aborted)) {
4810 pr_info("bypassing scsi_done() due to aborted cmd\n");
4813 scsi_done(scp); /* callback to mid level */
4816 /* When high resolution timer goes off this function is called. */
4817 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4819 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4821 sdebug_q_cmd_complete(sd_dp);
4822 return HRTIMER_NORESTART;
4825 /* When work queue schedules work, it calls this function. */
4826 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4828 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4830 sdebug_q_cmd_complete(sd_dp);
4833 static bool got_shared_uuid;
4834 static uuid_t shared_uuid;
4836 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4838 struct sdeb_zone_state *zsp;
4839 sector_t capacity = get_sdebug_capacity();
4840 sector_t zstart = 0;
4844 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4845 * a zone size allowing for at least 4 zones on the device. Otherwise,
4846 * use the specified zone size checking that at least 2 zones can be
4847 * created for the device.
4849 if (!sdeb_zbc_zone_size_mb) {
4850 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4851 >> ilog2(sdebug_sector_size);
4852 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4854 if (devip->zsize < 2) {
4855 pr_err("Device capacity too small\n");
4859 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4860 pr_err("Zone size is not a power of 2\n");
4863 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4864 >> ilog2(sdebug_sector_size);
4865 if (devip->zsize >= capacity) {
4866 pr_err("Zone size too large for device capacity\n");
4871 devip->zsize_shift = ilog2(devip->zsize);
4872 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4874 if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4875 pr_err("Number of conventional zones too large\n");
4878 devip->nr_conv_zones = sdeb_zbc_nr_conv;
4880 if (devip->zmodel == BLK_ZONED_HM) {
4881 /* zbc_max_open_zones can be 0, meaning "not reported" */
4882 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4883 devip->max_open = (devip->nr_zones - 1) / 2;
4885 devip->max_open = sdeb_zbc_max_open;
4888 devip->zstate = kcalloc(devip->nr_zones,
4889 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4893 for (i = 0; i < devip->nr_zones; i++) {
4894 zsp = &devip->zstate[i];
4896 zsp->z_start = zstart;
4898 if (i < devip->nr_conv_zones) {
4899 zsp->z_type = ZBC_ZONE_TYPE_CNV;
4900 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4901 zsp->z_wp = (sector_t)-1;
4903 if (devip->zmodel == BLK_ZONED_HM)
4904 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4906 zsp->z_type = ZBC_ZONE_TYPE_SWP;
4907 zsp->z_cond = ZC1_EMPTY;
4908 zsp->z_wp = zsp->z_start;
4911 if (zsp->z_start + devip->zsize < capacity)
4912 zsp->z_size = devip->zsize;
4914 zsp->z_size = capacity - zsp->z_start;
4916 zstart += zsp->z_size;
4922 static struct sdebug_dev_info *sdebug_device_create(
4923 struct sdebug_host_info *sdbg_host, gfp_t flags)
4925 struct sdebug_dev_info *devip;
4927 devip = kzalloc(sizeof(*devip), flags);
4929 if (sdebug_uuid_ctl == 1)
4930 uuid_gen(&devip->lu_name);
4931 else if (sdebug_uuid_ctl == 2) {
4932 if (got_shared_uuid)
4933 devip->lu_name = shared_uuid;
4935 uuid_gen(&shared_uuid);
4936 got_shared_uuid = true;
4937 devip->lu_name = shared_uuid;
4940 devip->sdbg_host = sdbg_host;
4941 if (sdeb_zbc_in_use) {
4942 devip->zmodel = sdeb_zbc_model;
4943 if (sdebug_device_create_zones(devip)) {
4948 devip->zmodel = BLK_ZONED_NONE;
4950 devip->sdbg_host = sdbg_host;
4951 devip->create_ts = ktime_get_boottime();
4952 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4953 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4958 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4960 struct sdebug_host_info *sdbg_host;
4961 struct sdebug_dev_info *open_devip = NULL;
4962 struct sdebug_dev_info *devip;
4964 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4966 pr_err("Host info NULL\n");
4970 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4971 if ((devip->used) && (devip->channel == sdev->channel) &&
4972 (devip->target == sdev->id) &&
4973 (devip->lun == sdev->lun))
4976 if ((!devip->used) && (!open_devip))
4980 if (!open_devip) { /* try and make a new one */
4981 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4983 pr_err("out of memory at line %d\n", __LINE__);
4988 open_devip->channel = sdev->channel;
4989 open_devip->target = sdev->id;
4990 open_devip->lun = sdev->lun;
4991 open_devip->sdbg_host = sdbg_host;
4992 atomic_set(&open_devip->num_in_q, 0);
4993 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4994 open_devip->used = true;
4998 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5001 pr_info("slave_alloc <%u %u %u %llu>\n",
5002 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5006 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5008 struct sdebug_dev_info *devip =
5009 (struct sdebug_dev_info *)sdp->hostdata;
5012 pr_info("slave_configure <%u %u %u %llu>\n",
5013 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5014 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5015 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5016 if (devip == NULL) {
5017 devip = find_build_dev_info(sdp);
5019 return 1; /* no resources, will be marked offline */
5021 sdp->hostdata = devip;
5023 sdp->no_uld_attach = 1;
5024 config_cdb_len(sdp);
5028 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5030 struct sdebug_dev_info *devip =
5031 (struct sdebug_dev_info *)sdp->hostdata;
5034 pr_info("slave_destroy <%u %u %u %llu>\n",
5035 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5037 /* make this slot available for re-use */
5038 devip->used = false;
5039 sdp->hostdata = NULL;
5043 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5044 enum sdeb_defer_type defer_t)
5048 if (defer_t == SDEB_DEFER_HRT)
5049 hrtimer_cancel(&sd_dp->hrt);
5050 else if (defer_t == SDEB_DEFER_WQ)
5051 cancel_work_sync(&sd_dp->ew.work);
5054 /* If @cmnd found deletes its timer or work queue and returns true; else
5056 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5058 unsigned long iflags;
5059 int j, k, qmax, r_qmax;
5060 enum sdeb_defer_type l_defer_t;
5061 struct sdebug_queue *sqp;
5062 struct sdebug_queued_cmd *sqcp;
5063 struct sdebug_dev_info *devip;
5064 struct sdebug_defer *sd_dp;
5066 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5067 spin_lock_irqsave(&sqp->qc_lock, iflags);
5068 qmax = sdebug_max_queue;
5069 r_qmax = atomic_read(&retired_max_queue);
5072 for (k = 0; k < qmax; ++k) {
5073 if (test_bit(k, sqp->in_use_bm)) {
5074 sqcp = &sqp->qc_arr[k];
5075 if (cmnd != sqcp->a_cmnd)
5078 devip = (struct sdebug_dev_info *)
5079 cmnd->device->hostdata;
5081 atomic_dec(&devip->num_in_q);
5082 sqcp->a_cmnd = NULL;
5083 sd_dp = sqcp->sd_dp;
5085 l_defer_t = sd_dp->defer_t;
5086 sd_dp->defer_t = SDEB_DEFER_NONE;
5088 l_defer_t = SDEB_DEFER_NONE;
5089 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5090 stop_qc_helper(sd_dp, l_defer_t);
5091 clear_bit(k, sqp->in_use_bm);
5095 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5100 /* Deletes (stops) timers or work queues of all queued commands */
5101 static void stop_all_queued(void)
5103 unsigned long iflags;
5105 enum sdeb_defer_type l_defer_t;
5106 struct sdebug_queue *sqp;
5107 struct sdebug_queued_cmd *sqcp;
5108 struct sdebug_dev_info *devip;
5109 struct sdebug_defer *sd_dp;
5111 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5112 spin_lock_irqsave(&sqp->qc_lock, iflags);
5113 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5114 if (test_bit(k, sqp->in_use_bm)) {
5115 sqcp = &sqp->qc_arr[k];
5116 if (sqcp->a_cmnd == NULL)
5118 devip = (struct sdebug_dev_info *)
5119 sqcp->a_cmnd->device->hostdata;
5121 atomic_dec(&devip->num_in_q);
5122 sqcp->a_cmnd = NULL;
5123 sd_dp = sqcp->sd_dp;
5125 l_defer_t = sd_dp->defer_t;
5126 sd_dp->defer_t = SDEB_DEFER_NONE;
5128 l_defer_t = SDEB_DEFER_NONE;
5129 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5130 stop_qc_helper(sd_dp, l_defer_t);
5131 clear_bit(k, sqp->in_use_bm);
5132 spin_lock_irqsave(&sqp->qc_lock, iflags);
5135 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5139 /* Free queued command memory on heap */
5140 static void free_all_queued(void)
5143 struct sdebug_queue *sqp;
5144 struct sdebug_queued_cmd *sqcp;
5146 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5147 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5148 sqcp = &sqp->qc_arr[k];
5155 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5161 ok = stop_queued_cmnd(SCpnt);
5162 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5163 sdev_printk(KERN_INFO, SCpnt->device,
5164 "%s: command%s found\n", __func__,
5170 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5173 if (SCpnt && SCpnt->device) {
5174 struct scsi_device *sdp = SCpnt->device;
5175 struct sdebug_dev_info *devip =
5176 (struct sdebug_dev_info *)sdp->hostdata;
5178 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5179 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5181 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5186 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5188 struct sdebug_host_info *sdbg_host;
5189 struct sdebug_dev_info *devip;
5190 struct scsi_device *sdp;
5191 struct Scsi_Host *hp;
5194 ++num_target_resets;
5197 sdp = SCpnt->device;
5200 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5201 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5205 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5207 list_for_each_entry(devip,
5208 &sdbg_host->dev_info_list,
5210 if (devip->target == sdp->id) {
5211 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5215 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5216 sdev_printk(KERN_INFO, sdp,
5217 "%s: %d device(s) found in target\n", __func__, k);
5222 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5224 struct sdebug_host_info *sdbg_host;
5225 struct sdebug_dev_info *devip;
5226 struct scsi_device *sdp;
5227 struct Scsi_Host *hp;
5231 if (!(SCpnt && SCpnt->device))
5233 sdp = SCpnt->device;
5234 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5235 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5238 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5240 list_for_each_entry(devip,
5241 &sdbg_host->dev_info_list,
5243 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5248 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5249 sdev_printk(KERN_INFO, sdp,
5250 "%s: %d device(s) found in host\n", __func__, k);
5255 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5257 struct sdebug_host_info *sdbg_host;
5258 struct sdebug_dev_info *devip;
5262 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5263 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5264 spin_lock(&sdebug_host_list_lock);
5265 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5266 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5268 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5272 spin_unlock(&sdebug_host_list_lock);
5274 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5275 sdev_printk(KERN_INFO, SCpnt->device,
5276 "%s: %d device(s) found\n", __func__, k);
5280 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5282 struct msdos_partition *pp;
5283 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5284 int sectors_per_part, num_sectors, k;
5285 int heads_by_sects, start_sec, end_sec;
5287 /* assume partition table already zeroed */
5288 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5290 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5291 sdebug_num_parts = SDEBUG_MAX_PARTS;
5292 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5294 num_sectors = (int)get_sdebug_capacity();
5295 sectors_per_part = (num_sectors - sdebug_sectors_per)
5297 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5298 starts[0] = sdebug_sectors_per;
5299 max_part_secs = sectors_per_part;
5300 for (k = 1; k < sdebug_num_parts; ++k) {
5301 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5303 if (starts[k] - starts[k - 1] < max_part_secs)
5304 max_part_secs = starts[k] - starts[k - 1];
5306 starts[sdebug_num_parts] = num_sectors;
5307 starts[sdebug_num_parts + 1] = 0;
5309 ramp[510] = 0x55; /* magic partition markings */
5311 pp = (struct msdos_partition *)(ramp + 0x1be);
5312 for (k = 0; starts[k + 1]; ++k, ++pp) {
5313 start_sec = starts[k];
5314 end_sec = starts[k] + max_part_secs - 1;
5317 pp->cyl = start_sec / heads_by_sects;
5318 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5319 / sdebug_sectors_per;
5320 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5322 pp->end_cyl = end_sec / heads_by_sects;
5323 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5324 / sdebug_sectors_per;
5325 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5327 pp->start_sect = cpu_to_le32(start_sec);
5328 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5329 pp->sys_ind = 0x83; /* plain Linux partition */
5333 static void block_unblock_all_queues(bool block)
5336 struct sdebug_queue *sqp;
5338 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5339 atomic_set(&sqp->blocked, (int)block);
5342 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5343 * commands will be processed normally before triggers occur.
5345 static void tweak_cmnd_count(void)
5349 modulo = abs(sdebug_every_nth);
5352 block_unblock_all_queues(true);
5353 count = atomic_read(&sdebug_cmnd_count);
5354 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5355 block_unblock_all_queues(false);
5358 static void clear_queue_stats(void)
5360 atomic_set(&sdebug_cmnd_count, 0);
5361 atomic_set(&sdebug_completions, 0);
5362 atomic_set(&sdebug_miss_cpus, 0);
5363 atomic_set(&sdebug_a_tsf, 0);
5366 static bool inject_on_this_cmd(void)
5368 if (sdebug_every_nth == 0)
5370 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5373 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5375 /* Complete the processing of the thread that queued a SCSI command to this
5376 * driver. It either completes the command by calling cmnd_done() or
5377 * schedules a hr timer or work queue then returns 0. Returns
5378 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5380 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5382 int (*pfp)(struct scsi_cmnd *,
5383 struct sdebug_dev_info *),
5384 int delta_jiff, int ndelay)
5387 bool inject = false;
5388 bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5389 int k, num_in_q, qdepth;
5390 unsigned long iflags;
5391 u64 ns_from_boot = 0;
5392 struct sdebug_queue *sqp;
5393 struct sdebug_queued_cmd *sqcp;
5394 struct scsi_device *sdp;
5395 struct sdebug_defer *sd_dp;
5397 if (unlikely(devip == NULL)) {
5398 if (scsi_result == 0)
5399 scsi_result = DID_NO_CONNECT << 16;
5400 goto respond_in_thread;
5404 if (delta_jiff == 0)
5405 goto respond_in_thread;
5407 sqp = get_queue(cmnd);
5408 spin_lock_irqsave(&sqp->qc_lock, iflags);
5409 if (unlikely(atomic_read(&sqp->blocked))) {
5410 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5411 return SCSI_MLQUEUE_HOST_BUSY;
5413 num_in_q = atomic_read(&devip->num_in_q);
5414 qdepth = cmnd->device->queue_depth;
5415 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5417 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5418 goto respond_in_thread;
5420 scsi_result = device_qfull_result;
5421 } else if (unlikely(sdebug_every_nth &&
5422 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5423 (scsi_result == 0))) {
5424 if ((num_in_q == (qdepth - 1)) &&
5425 (atomic_inc_return(&sdebug_a_tsf) >=
5426 abs(sdebug_every_nth))) {
5427 atomic_set(&sdebug_a_tsf, 0);
5429 scsi_result = device_qfull_result;
5433 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5434 if (unlikely(k >= sdebug_max_queue)) {
5435 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5437 goto respond_in_thread;
5438 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5439 scsi_result = device_qfull_result;
5440 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5441 sdev_printk(KERN_INFO, sdp,
5442 "%s: max_queue=%d exceeded, %s\n",
5443 __func__, sdebug_max_queue,
5444 (scsi_result ? "status: TASK SET FULL" :
5445 "report: host busy"));
5447 goto respond_in_thread;
5449 return SCSI_MLQUEUE_HOST_BUSY;
5451 set_bit(k, sqp->in_use_bm);
5452 atomic_inc(&devip->num_in_q);
5453 sqcp = &sqp->qc_arr[k];
5454 sqcp->a_cmnd = cmnd;
5455 cmnd->host_scribble = (unsigned char *)sqcp;
5456 sd_dp = sqcp->sd_dp;
5457 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5460 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5462 atomic_dec(&devip->num_in_q);
5463 clear_bit(k, sqp->in_use_bm);
5464 return SCSI_MLQUEUE_HOST_BUSY;
5471 /* Set the hostwide tag */
5472 if (sdebug_host_max_queue)
5473 sd_dp->hc_idx = get_tag(cmnd);
5476 ns_from_boot = ktime_get_boottime_ns();
5478 /* one of the resp_*() response functions is called here */
5479 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5480 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5481 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5482 delta_jiff = ndelay = 0;
5484 if (cmnd->result == 0 && scsi_result != 0)
5485 cmnd->result = scsi_result;
5486 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5487 if (atomic_read(&sdeb_inject_pending)) {
5488 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5489 atomic_set(&sdeb_inject_pending, 0);
5490 cmnd->result = check_condition_result;
5494 if (unlikely(sdebug_verbose && cmnd->result))
5495 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5496 __func__, cmnd->result);
5498 if (delta_jiff > 0 || ndelay > 0) {
5501 if (delta_jiff > 0) {
5502 u64 ns = jiffies_to_nsecs(delta_jiff);
5504 if (sdebug_random && ns < U32_MAX) {
5505 ns = prandom_u32_max((u32)ns);
5506 } else if (sdebug_random) {
5507 ns >>= 12; /* scale to 4 usec precision */
5508 if (ns < U32_MAX) /* over 4 hours max */
5509 ns = prandom_u32_max((u32)ns);
5512 kt = ns_to_ktime(ns);
5513 } else { /* ndelay has a 4.2 second max */
5514 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5516 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5517 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5519 if (kt <= d) { /* elapsed duration >= kt */
5520 spin_lock_irqsave(&sqp->qc_lock, iflags);
5521 sqcp->a_cmnd = NULL;
5522 atomic_dec(&devip->num_in_q);
5523 clear_bit(k, sqp->in_use_bm);
5524 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5527 /* call scsi_done() from this thread */
5531 /* otherwise reduce kt by elapsed time */
5536 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5537 spin_lock_irqsave(&sqp->qc_lock, iflags);
5538 if (!sd_dp->init_poll) {
5539 sd_dp->init_poll = true;
5540 sqcp->sd_dp = sd_dp;
5541 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5544 sd_dp->defer_t = SDEB_DEFER_POLL;
5545 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5547 if (!sd_dp->init_hrt) {
5548 sd_dp->init_hrt = true;
5549 sqcp->sd_dp = sd_dp;
5550 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5551 HRTIMER_MODE_REL_PINNED);
5552 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5553 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5556 sd_dp->defer_t = SDEB_DEFER_HRT;
5557 /* schedule the invocation of scsi_done() for a later time */
5558 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5560 if (sdebug_statistics)
5561 sd_dp->issuing_cpu = raw_smp_processor_id();
5562 } else { /* jdelay < 0, use work queue */
5563 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5564 atomic_read(&sdeb_inject_pending)))
5565 sd_dp->aborted = true;
5567 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5568 spin_lock_irqsave(&sqp->qc_lock, iflags);
5569 if (!sd_dp->init_poll) {
5570 sd_dp->init_poll = true;
5571 sqcp->sd_dp = sd_dp;
5572 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5575 sd_dp->defer_t = SDEB_DEFER_POLL;
5576 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5578 if (!sd_dp->init_wq) {
5579 sd_dp->init_wq = true;
5580 sqcp->sd_dp = sd_dp;
5581 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5583 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5585 sd_dp->defer_t = SDEB_DEFER_WQ;
5586 schedule_work(&sd_dp->ew.work);
5588 if (sdebug_statistics)
5589 sd_dp->issuing_cpu = raw_smp_processor_id();
5590 if (unlikely(sd_dp->aborted)) {
5591 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5592 scsi_cmd_to_rq(cmnd)->tag);
5593 blk_abort_request(scsi_cmd_to_rq(cmnd));
5594 atomic_set(&sdeb_inject_pending, 0);
5595 sd_dp->aborted = false;
5598 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5599 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5600 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5603 respond_in_thread: /* call back to mid-layer using invocation thread */
5604 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5605 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5606 if (cmnd->result == 0 && scsi_result != 0)
5607 cmnd->result = scsi_result;
5612 /* Note: The following macros create attribute files in the
5613 /sys/module/scsi_debug/parameters directory. Unfortunately this
5614 driver is unaware of a change and cannot trigger auxiliary actions
5615 as it can when the corresponding attribute in the
5616 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5618 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5619 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5620 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5621 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5622 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5623 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5624 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5625 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5626 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5627 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5628 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5629 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5630 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5631 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5632 module_param_string(inq_product, sdebug_inq_product_id,
5633 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5634 module_param_string(inq_rev, sdebug_inq_product_rev,
5635 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5636 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5637 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5638 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5639 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5640 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5641 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5642 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5643 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5644 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5645 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5646 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5648 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5650 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5651 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5652 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5653 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5654 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5655 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5656 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5657 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5658 module_param_named(per_host_store, sdebug_per_host_store, bool,
5660 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5661 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5662 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5663 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5664 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5665 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5666 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5667 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5668 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5669 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5670 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5671 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5672 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5673 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5674 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5675 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5676 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5677 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5679 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5680 module_param_named(write_same_length, sdebug_write_same_length, int,
5682 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5683 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5684 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5685 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5687 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5688 MODULE_DESCRIPTION("SCSI debug adapter driver");
5689 MODULE_LICENSE("GPL");
5690 MODULE_VERSION(SDEBUG_VERSION);
5692 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5693 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5694 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5695 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5696 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5697 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5698 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5699 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5700 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5701 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5702 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5703 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5704 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5705 MODULE_PARM_DESC(host_max_queue,
5706 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5707 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5708 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5709 SDEBUG_VERSION "\")");
5710 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5711 MODULE_PARM_DESC(lbprz,
5712 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5713 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5714 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5715 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5716 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5717 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5718 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5719 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5720 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5721 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5722 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5723 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5724 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5725 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5726 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5727 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5728 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5729 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5730 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5731 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5732 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5733 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5734 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5735 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5736 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5737 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5738 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5739 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5740 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5741 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5742 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5743 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5744 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5745 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5746 MODULE_PARM_DESC(uuid_ctl,
5747 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5748 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5749 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5750 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5751 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5752 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5753 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5754 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5755 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5757 #define SDEBUG_INFO_LEN 256
5758 static char sdebug_info[SDEBUG_INFO_LEN];
5760 static const char *scsi_debug_info(struct Scsi_Host *shp)
5764 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5765 my_name, SDEBUG_VERSION, sdebug_version_date);
5766 if (k >= (SDEBUG_INFO_LEN - 1))
5768 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5769 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5770 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5771 "statistics", (int)sdebug_statistics);
5775 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5776 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5781 int minLen = length > 15 ? 15 : length;
5783 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5785 memcpy(arr, buffer, minLen);
5787 if (1 != sscanf(arr, "%d", &opts))
5790 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5791 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5792 if (sdebug_every_nth != 0)
5797 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5798 * same for each scsi_debug host (if more than one). Some of the counters
5799 * output are not atomics so might be inaccurate in a busy system. */
5800 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5803 struct sdebug_queue *sqp;
5804 struct sdebug_host_info *sdhp;
5806 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5807 SDEBUG_VERSION, sdebug_version_date);
5808 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5809 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5810 sdebug_opts, sdebug_every_nth);
5811 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5812 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5813 sdebug_sector_size, "bytes");
5814 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5815 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5817 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5818 num_dev_resets, num_target_resets, num_bus_resets,
5820 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5821 dix_reads, dix_writes, dif_errors);
5822 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5824 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5825 atomic_read(&sdebug_cmnd_count),
5826 atomic_read(&sdebug_completions),
5827 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5828 atomic_read(&sdebug_a_tsf),
5829 atomic_read(&sdeb_mq_poll_count));
5831 seq_printf(m, "submit_queues=%d\n", submit_queues);
5832 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5833 seq_printf(m, " queue %d:\n", j);
5834 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5835 if (f != sdebug_max_queue) {
5836 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5837 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5838 "first,last bits", f, l);
5842 seq_printf(m, "this host_no=%d\n", host->host_no);
5843 if (!xa_empty(per_store_ap)) {
5846 unsigned long l_idx;
5847 struct sdeb_store_info *sip;
5849 seq_puts(m, "\nhost list:\n");
5851 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5853 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5854 sdhp->shost->host_no, idx);
5857 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5858 sdeb_most_recent_idx);
5860 xa_for_each(per_store_ap, l_idx, sip) {
5861 niu = xa_get_mark(per_store_ap, l_idx,
5862 SDEB_XA_NOT_IN_USE);
5864 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5865 (niu ? " not_in_use" : ""));
5872 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5874 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5876 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5877 * of delay is jiffies.
5879 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5884 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5886 if (sdebug_jdelay != jdelay) {
5888 struct sdebug_queue *sqp;
5890 block_unblock_all_queues(true);
5891 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5893 k = find_first_bit(sqp->in_use_bm,
5895 if (k != sdebug_max_queue) {
5896 res = -EBUSY; /* queued commands */
5901 sdebug_jdelay = jdelay;
5904 block_unblock_all_queues(false);
5910 static DRIVER_ATTR_RW(delay);
5912 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5914 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5916 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5917 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5918 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5923 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5924 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5926 if (sdebug_ndelay != ndelay) {
5928 struct sdebug_queue *sqp;
5930 block_unblock_all_queues(true);
5931 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5933 k = find_first_bit(sqp->in_use_bm,
5935 if (k != sdebug_max_queue) {
5936 res = -EBUSY; /* queued commands */
5941 sdebug_ndelay = ndelay;
5942 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
5945 block_unblock_all_queues(false);
5951 static DRIVER_ATTR_RW(ndelay);
5953 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5955 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5958 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5964 if (sscanf(buf, "%10s", work) == 1) {
5965 if (strncasecmp(work, "0x", 2) == 0) {
5966 if (kstrtoint(work + 2, 16, &opts) == 0)
5969 if (kstrtoint(work, 10, &opts) == 0)
5976 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5977 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5981 static DRIVER_ATTR_RW(opts);
5983 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5985 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5987 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5992 /* Cannot change from or to TYPE_ZBC with sysfs */
5993 if (sdebug_ptype == TYPE_ZBC)
5996 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6004 static DRIVER_ATTR_RW(ptype);
6006 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6008 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6010 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6015 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6021 static DRIVER_ATTR_RW(dsense);
6023 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6025 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6027 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6032 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6033 bool want_store = (n == 0);
6034 struct sdebug_host_info *sdhp;
6037 sdebug_fake_rw = (sdebug_fake_rw > 0);
6038 if (sdebug_fake_rw == n)
6039 return count; /* not transitioning so do nothing */
6041 if (want_store) { /* 1 --> 0 transition, set up store */
6042 if (sdeb_first_idx < 0) {
6043 idx = sdebug_add_store();
6047 idx = sdeb_first_idx;
6048 xa_clear_mark(per_store_ap, idx,
6049 SDEB_XA_NOT_IN_USE);
6051 /* make all hosts use same store */
6052 list_for_each_entry(sdhp, &sdebug_host_list,
6054 if (sdhp->si_idx != idx) {
6055 xa_set_mark(per_store_ap, sdhp->si_idx,
6056 SDEB_XA_NOT_IN_USE);
6060 sdeb_most_recent_idx = idx;
6061 } else { /* 0 --> 1 transition is trigger for shrink */
6062 sdebug_erase_all_stores(true /* apart from first */);
6069 static DRIVER_ATTR_RW(fake_rw);
6071 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6073 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6075 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6080 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6081 sdebug_no_lun_0 = n;
6086 static DRIVER_ATTR_RW(no_lun_0);
6088 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6090 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6092 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6097 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6098 sdebug_num_tgts = n;
6099 sdebug_max_tgts_luns();
6104 static DRIVER_ATTR_RW(num_tgts);
6106 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6108 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6110 static DRIVER_ATTR_RO(dev_size_mb);
6112 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6114 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6117 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6122 if (kstrtobool(buf, &v))
6125 sdebug_per_host_store = v;
6128 static DRIVER_ATTR_RW(per_host_store);
6130 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6132 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6134 static DRIVER_ATTR_RO(num_parts);
6136 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6138 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6140 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6146 if (sscanf(buf, "%10s", work) == 1) {
6147 if (strncasecmp(work, "0x", 2) == 0) {
6148 if (kstrtoint(work + 2, 16, &nth) == 0)
6149 goto every_nth_done;
6151 if (kstrtoint(work, 10, &nth) == 0)
6152 goto every_nth_done;
6158 sdebug_every_nth = nth;
6159 if (nth && !sdebug_statistics) {
6160 pr_info("every_nth needs statistics=1, set it\n");
6161 sdebug_statistics = true;
6166 static DRIVER_ATTR_RW(every_nth);
6168 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6170 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6172 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6178 if (kstrtoint(buf, 0, &n))
6181 if (n > (int)SAM_LUN_AM_FLAT) {
6182 pr_warn("only LUN address methods 0 and 1 are supported\n");
6185 changed = ((int)sdebug_lun_am != n);
6187 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6188 struct sdebug_host_info *sdhp;
6189 struct sdebug_dev_info *dp;
6191 spin_lock(&sdebug_host_list_lock);
6192 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6193 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6194 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6197 spin_unlock(&sdebug_host_list_lock);
6203 static DRIVER_ATTR_RW(lun_format);
6205 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6207 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6209 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6215 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6217 pr_warn("max_luns can be no more than 256\n");
6220 changed = (sdebug_max_luns != n);
6221 sdebug_max_luns = n;
6222 sdebug_max_tgts_luns();
6223 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6224 struct sdebug_host_info *sdhp;
6225 struct sdebug_dev_info *dp;
6227 spin_lock(&sdebug_host_list_lock);
6228 list_for_each_entry(sdhp, &sdebug_host_list,
6230 list_for_each_entry(dp, &sdhp->dev_info_list,
6232 set_bit(SDEBUG_UA_LUNS_CHANGED,
6236 spin_unlock(&sdebug_host_list_lock);
6242 static DRIVER_ATTR_RW(max_luns);
6244 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6246 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6248 /* N.B. max_queue can be changed while there are queued commands. In flight
6249 * commands beyond the new max_queue will be completed. */
6250 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6254 struct sdebug_queue *sqp;
6256 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6257 (n <= SDEBUG_CANQUEUE) &&
6258 (sdebug_host_max_queue == 0)) {
6259 block_unblock_all_queues(true);
6261 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6263 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6267 sdebug_max_queue = n;
6268 if (k == SDEBUG_CANQUEUE)
6269 atomic_set(&retired_max_queue, 0);
6271 atomic_set(&retired_max_queue, k + 1);
6273 atomic_set(&retired_max_queue, 0);
6274 block_unblock_all_queues(false);
6279 static DRIVER_ATTR_RW(max_queue);
6281 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6283 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6287 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6288 * in range [0, sdebug_host_max_queue), we can't change it.
6290 static DRIVER_ATTR_RO(host_max_queue);
6292 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6294 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6296 static DRIVER_ATTR_RO(no_uld);
6298 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6300 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6302 static DRIVER_ATTR_RO(scsi_level);
6304 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6306 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6308 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6314 /* Ignore capacity change for ZBC drives for now */
6315 if (sdeb_zbc_in_use)
6318 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6319 changed = (sdebug_virtual_gb != n);
6320 sdebug_virtual_gb = n;
6321 sdebug_capacity = get_sdebug_capacity();
6323 struct sdebug_host_info *sdhp;
6324 struct sdebug_dev_info *dp;
6326 spin_lock(&sdebug_host_list_lock);
6327 list_for_each_entry(sdhp, &sdebug_host_list,
6329 list_for_each_entry(dp, &sdhp->dev_info_list,
6331 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6335 spin_unlock(&sdebug_host_list_lock);
6341 static DRIVER_ATTR_RW(virtual_gb);
6343 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6345 /* absolute number of hosts currently active is what is shown */
6346 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6349 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6354 struct sdeb_store_info *sip;
6355 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6358 if (sscanf(buf, "%d", &delta_hosts) != 1)
6360 if (delta_hosts > 0) {
6364 xa_for_each_marked(per_store_ap, idx, sip,
6365 SDEB_XA_NOT_IN_USE) {
6366 sdeb_most_recent_idx = (int)idx;
6370 if (found) /* re-use case */
6371 sdebug_add_host_helper((int)idx);
6373 sdebug_do_add_host(true);
6375 sdebug_do_add_host(false);
6377 } while (--delta_hosts);
6378 } else if (delta_hosts < 0) {
6380 sdebug_do_remove_host(false);
6381 } while (++delta_hosts);
6385 static DRIVER_ATTR_RW(add_host);
6387 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6389 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6391 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6396 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6397 sdebug_vpd_use_hostno = n;
6402 static DRIVER_ATTR_RW(vpd_use_hostno);
6404 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6406 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6408 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6413 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6415 sdebug_statistics = true;
6417 clear_queue_stats();
6418 sdebug_statistics = false;
6424 static DRIVER_ATTR_RW(statistics);
6426 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6428 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6430 static DRIVER_ATTR_RO(sector_size);
6432 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6434 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6436 static DRIVER_ATTR_RO(submit_queues);
6438 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6440 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6442 static DRIVER_ATTR_RO(dix);
6444 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6446 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6448 static DRIVER_ATTR_RO(dif);
6450 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6452 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6454 static DRIVER_ATTR_RO(guard);
6456 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6458 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6460 static DRIVER_ATTR_RO(ato);
6462 static ssize_t map_show(struct device_driver *ddp, char *buf)
6466 if (!scsi_debug_lbp())
6467 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6468 sdebug_store_sectors);
6470 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6471 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6474 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6475 (int)map_size, sip->map_storep);
6477 buf[count++] = '\n';
6482 static DRIVER_ATTR_RO(map);
6484 static ssize_t random_show(struct device_driver *ddp, char *buf)
6486 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6489 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6494 if (kstrtobool(buf, &v))
6500 static DRIVER_ATTR_RW(random);
6502 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6504 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6506 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6511 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6512 sdebug_removable = (n > 0);
6517 static DRIVER_ATTR_RW(removable);
6519 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6521 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6523 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6524 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6529 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6530 sdebug_host_lock = (n > 0);
6535 static DRIVER_ATTR_RW(host_lock);
6537 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6539 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6541 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6546 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6547 sdebug_strict = (n > 0);
6552 static DRIVER_ATTR_RW(strict);
6554 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6556 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6558 static DRIVER_ATTR_RO(uuid_ctl);
6560 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6562 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6564 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6569 ret = kstrtoint(buf, 0, &n);
6573 all_config_cdb_len();
6576 static DRIVER_ATTR_RW(cdb_len);
6578 static const char * const zbc_model_strs_a[] = {
6579 [BLK_ZONED_NONE] = "none",
6580 [BLK_ZONED_HA] = "host-aware",
6581 [BLK_ZONED_HM] = "host-managed",
6584 static const char * const zbc_model_strs_b[] = {
6585 [BLK_ZONED_NONE] = "no",
6586 [BLK_ZONED_HA] = "aware",
6587 [BLK_ZONED_HM] = "managed",
6590 static const char * const zbc_model_strs_c[] = {
6591 [BLK_ZONED_NONE] = "0",
6592 [BLK_ZONED_HA] = "1",
6593 [BLK_ZONED_HM] = "2",
6596 static int sdeb_zbc_model_str(const char *cp)
6598 int res = sysfs_match_string(zbc_model_strs_a, cp);
6601 res = sysfs_match_string(zbc_model_strs_b, cp);
6603 res = sysfs_match_string(zbc_model_strs_c, cp);
6611 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6613 return scnprintf(buf, PAGE_SIZE, "%s\n",
6614 zbc_model_strs_a[sdeb_zbc_model]);
6616 static DRIVER_ATTR_RO(zbc);
6618 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6620 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6622 static DRIVER_ATTR_RO(tur_ms_to_ready);
6624 /* Note: The following array creates attribute files in the
6625 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6626 files (over those found in the /sys/module/scsi_debug/parameters
6627 directory) is that auxiliary actions can be triggered when an attribute
6628 is changed. For example see: add_host_store() above.
6631 static struct attribute *sdebug_drv_attrs[] = {
6632 &driver_attr_delay.attr,
6633 &driver_attr_opts.attr,
6634 &driver_attr_ptype.attr,
6635 &driver_attr_dsense.attr,
6636 &driver_attr_fake_rw.attr,
6637 &driver_attr_host_max_queue.attr,
6638 &driver_attr_no_lun_0.attr,
6639 &driver_attr_num_tgts.attr,
6640 &driver_attr_dev_size_mb.attr,
6641 &driver_attr_num_parts.attr,
6642 &driver_attr_every_nth.attr,
6643 &driver_attr_lun_format.attr,
6644 &driver_attr_max_luns.attr,
6645 &driver_attr_max_queue.attr,
6646 &driver_attr_no_uld.attr,
6647 &driver_attr_scsi_level.attr,
6648 &driver_attr_virtual_gb.attr,
6649 &driver_attr_add_host.attr,
6650 &driver_attr_per_host_store.attr,
6651 &driver_attr_vpd_use_hostno.attr,
6652 &driver_attr_sector_size.attr,
6653 &driver_attr_statistics.attr,
6654 &driver_attr_submit_queues.attr,
6655 &driver_attr_dix.attr,
6656 &driver_attr_dif.attr,
6657 &driver_attr_guard.attr,
6658 &driver_attr_ato.attr,
6659 &driver_attr_map.attr,
6660 &driver_attr_random.attr,
6661 &driver_attr_removable.attr,
6662 &driver_attr_host_lock.attr,
6663 &driver_attr_ndelay.attr,
6664 &driver_attr_strict.attr,
6665 &driver_attr_uuid_ctl.attr,
6666 &driver_attr_cdb_len.attr,
6667 &driver_attr_tur_ms_to_ready.attr,
6668 &driver_attr_zbc.attr,
6671 ATTRIBUTE_GROUPS(sdebug_drv);
6673 static struct device *pseudo_primary;
6675 static int __init scsi_debug_init(void)
6677 bool want_store = (sdebug_fake_rw == 0);
6679 int k, ret, hosts_to_add;
6682 ramdisk_lck_a[0] = &atomic_rw;
6683 ramdisk_lck_a[1] = &atomic_rw2;
6684 atomic_set(&retired_max_queue, 0);
6686 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6687 pr_warn("ndelay must be less than 1 second, ignored\n");
6689 } else if (sdebug_ndelay > 0)
6690 sdebug_jdelay = JDELAY_OVERRIDDEN;
6692 switch (sdebug_sector_size) {
6699 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6703 switch (sdebug_dif) {
6704 case T10_PI_TYPE0_PROTECTION:
6706 case T10_PI_TYPE1_PROTECTION:
6707 case T10_PI_TYPE2_PROTECTION:
6708 case T10_PI_TYPE3_PROTECTION:
6709 have_dif_prot = true;
6713 pr_err("dif must be 0, 1, 2 or 3\n");
6717 if (sdebug_num_tgts < 0) {
6718 pr_err("num_tgts must be >= 0\n");
6722 if (sdebug_guard > 1) {
6723 pr_err("guard must be 0 or 1\n");
6727 if (sdebug_ato > 1) {
6728 pr_err("ato must be 0 or 1\n");
6732 if (sdebug_physblk_exp > 15) {
6733 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6737 sdebug_lun_am = sdebug_lun_am_i;
6738 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6739 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6740 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6743 if (sdebug_max_luns > 256) {
6744 if (sdebug_max_luns > 16384) {
6745 pr_warn("max_luns can be no more than 16384, use default\n");
6746 sdebug_max_luns = DEF_MAX_LUNS;
6748 sdebug_lun_am = SAM_LUN_AM_FLAT;
6751 if (sdebug_lowest_aligned > 0x3fff) {
6752 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6756 if (submit_queues < 1) {
6757 pr_err("submit_queues must be 1 or more\n");
6761 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6762 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6766 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6767 (sdebug_host_max_queue < 0)) {
6768 pr_err("host_max_queue must be in range [0 %d]\n",
6773 if (sdebug_host_max_queue &&
6774 (sdebug_max_queue != sdebug_host_max_queue)) {
6775 sdebug_max_queue = sdebug_host_max_queue;
6776 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6780 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6782 if (sdebug_q_arr == NULL)
6784 for (k = 0; k < submit_queues; ++k)
6785 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6788 * check for host managed zoned block device specified with
6789 * ptype=0x14 or zbc=XXX.
6791 if (sdebug_ptype == TYPE_ZBC) {
6792 sdeb_zbc_model = BLK_ZONED_HM;
6793 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6794 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6800 switch (sdeb_zbc_model) {
6801 case BLK_ZONED_NONE:
6803 sdebug_ptype = TYPE_DISK;
6806 sdebug_ptype = TYPE_ZBC;
6809 pr_err("Invalid ZBC model\n");
6814 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6815 sdeb_zbc_in_use = true;
6816 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6817 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6820 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6821 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6822 if (sdebug_dev_size_mb < 1)
6823 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6824 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6825 sdebug_store_sectors = sz / sdebug_sector_size;
6826 sdebug_capacity = get_sdebug_capacity();
6828 /* play around with geometry, don't waste too much on track 0 */
6830 sdebug_sectors_per = 32;
6831 if (sdebug_dev_size_mb >= 256)
6833 else if (sdebug_dev_size_mb >= 16)
6835 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6836 (sdebug_sectors_per * sdebug_heads);
6837 if (sdebug_cylinders_per >= 1024) {
6838 /* other LLDs do this; implies >= 1GB ram disk ... */
6840 sdebug_sectors_per = 63;
6841 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6842 (sdebug_sectors_per * sdebug_heads);
6844 if (scsi_debug_lbp()) {
6845 sdebug_unmap_max_blocks =
6846 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6848 sdebug_unmap_max_desc =
6849 clamp(sdebug_unmap_max_desc, 0U, 256U);
6851 sdebug_unmap_granularity =
6852 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6854 if (sdebug_unmap_alignment &&
6855 sdebug_unmap_granularity <=
6856 sdebug_unmap_alignment) {
6857 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6862 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6864 idx = sdebug_add_store();
6871 pseudo_primary = root_device_register("pseudo_0");
6872 if (IS_ERR(pseudo_primary)) {
6873 pr_warn("root_device_register() error\n");
6874 ret = PTR_ERR(pseudo_primary);
6877 ret = bus_register(&pseudo_lld_bus);
6879 pr_warn("bus_register error: %d\n", ret);
6882 ret = driver_register(&sdebug_driverfs_driver);
6884 pr_warn("driver_register error: %d\n", ret);
6888 hosts_to_add = sdebug_add_host;
6889 sdebug_add_host = 0;
6891 for (k = 0; k < hosts_to_add; k++) {
6892 if (want_store && k == 0) {
6893 ret = sdebug_add_host_helper(idx);
6895 pr_err("add_host_helper k=%d, error=%d\n",
6900 ret = sdebug_do_add_host(want_store &&
6901 sdebug_per_host_store);
6903 pr_err("add_host k=%d error=%d\n", k, -ret);
6909 pr_info("built %d host(s)\n", sdebug_num_hosts);
6914 bus_unregister(&pseudo_lld_bus);
6916 root_device_unregister(pseudo_primary);
6918 sdebug_erase_store(idx, NULL);
6920 kfree(sdebug_q_arr);
6924 static void __exit scsi_debug_exit(void)
6926 int k = sdebug_num_hosts;
6930 sdebug_do_remove_host(true);
6932 driver_unregister(&sdebug_driverfs_driver);
6933 bus_unregister(&pseudo_lld_bus);
6934 root_device_unregister(pseudo_primary);
6936 sdebug_erase_all_stores(false);
6937 xa_destroy(per_store_ap);
6938 kfree(sdebug_q_arr);
6941 device_initcall(scsi_debug_init);
6942 module_exit(scsi_debug_exit);
6944 static void sdebug_release_adapter(struct device *dev)
6946 struct sdebug_host_info *sdbg_host;
6948 sdbg_host = to_sdebug_host(dev);
6952 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6953 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6958 if (xa_empty(per_store_ap))
6960 sip = xa_load(per_store_ap, idx);
6964 vfree(sip->map_storep);
6965 vfree(sip->dif_storep);
6967 xa_erase(per_store_ap, idx);
6971 /* Assume apart_from_first==false only in shutdown case. */
6972 static void sdebug_erase_all_stores(bool apart_from_first)
6975 struct sdeb_store_info *sip = NULL;
6977 xa_for_each(per_store_ap, idx, sip) {
6978 if (apart_from_first)
6979 apart_from_first = false;
6981 sdebug_erase_store(idx, sip);
6983 if (apart_from_first)
6984 sdeb_most_recent_idx = sdeb_first_idx;
6988 * Returns store xarray new element index (idx) if >=0 else negated errno.
6989 * Limit the number of stores to 65536.
6991 static int sdebug_add_store(void)
6995 unsigned long iflags;
6996 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6997 struct sdeb_store_info *sip = NULL;
6998 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7000 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7004 xa_lock_irqsave(per_store_ap, iflags);
7005 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7006 if (unlikely(res < 0)) {
7007 xa_unlock_irqrestore(per_store_ap, iflags);
7009 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7012 sdeb_most_recent_idx = n_idx;
7013 if (sdeb_first_idx < 0)
7014 sdeb_first_idx = n_idx;
7015 xa_unlock_irqrestore(per_store_ap, iflags);
7018 sip->storep = vzalloc(sz);
7020 pr_err("user data oom\n");
7023 if (sdebug_num_parts > 0)
7024 sdebug_build_parts(sip->storep, sz);
7026 /* DIF/DIX: what T10 calls Protection Information (PI) */
7030 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7031 sip->dif_storep = vmalloc(dif_size);
7033 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7036 if (!sip->dif_storep) {
7037 pr_err("DIX oom\n");
7040 memset(sip->dif_storep, 0xff, dif_size);
7042 /* Logical Block Provisioning */
7043 if (scsi_debug_lbp()) {
7044 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7045 sip->map_storep = vmalloc(array_size(sizeof(long),
7046 BITS_TO_LONGS(map_size)));
7048 pr_info("%lu provisioning blocks\n", map_size);
7050 if (!sip->map_storep) {
7051 pr_err("LBP map oom\n");
7055 bitmap_zero(sip->map_storep, map_size);
7057 /* Map first 1KB for partition table */
7058 if (sdebug_num_parts)
7059 map_region(sip, 0, 2);
7062 rwlock_init(&sip->macc_lck);
7065 sdebug_erase_store((int)n_idx, sip);
7066 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7070 static int sdebug_add_host_helper(int per_host_idx)
7072 int k, devs_per_host, idx;
7073 int error = -ENOMEM;
7074 struct sdebug_host_info *sdbg_host;
7075 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7077 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7080 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7081 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7082 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7083 sdbg_host->si_idx = idx;
7085 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7087 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7088 for (k = 0; k < devs_per_host; k++) {
7089 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7094 spin_lock(&sdebug_host_list_lock);
7095 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7096 spin_unlock(&sdebug_host_list_lock);
7098 sdbg_host->dev.bus = &pseudo_lld_bus;
7099 sdbg_host->dev.parent = pseudo_primary;
7100 sdbg_host->dev.release = &sdebug_release_adapter;
7101 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7103 error = device_register(&sdbg_host->dev);
7111 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7113 list_del(&sdbg_devinfo->dev_list);
7114 kfree(sdbg_devinfo->zstate);
7115 kfree(sdbg_devinfo);
7118 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7122 static int sdebug_do_add_host(bool mk_new_store)
7124 int ph_idx = sdeb_most_recent_idx;
7127 ph_idx = sdebug_add_store();
7131 return sdebug_add_host_helper(ph_idx);
7134 static void sdebug_do_remove_host(bool the_end)
7137 struct sdebug_host_info *sdbg_host = NULL;
7138 struct sdebug_host_info *sdbg_host2;
7140 spin_lock(&sdebug_host_list_lock);
7141 if (!list_empty(&sdebug_host_list)) {
7142 sdbg_host = list_entry(sdebug_host_list.prev,
7143 struct sdebug_host_info, host_list);
7144 idx = sdbg_host->si_idx;
7146 if (!the_end && idx >= 0) {
7149 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7150 if (sdbg_host2 == sdbg_host)
7152 if (idx == sdbg_host2->si_idx) {
7158 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7159 if (idx == sdeb_most_recent_idx)
7160 --sdeb_most_recent_idx;
7164 list_del(&sdbg_host->host_list);
7165 spin_unlock(&sdebug_host_list_lock);
7170 device_unregister(&sdbg_host->dev);
7174 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7177 struct sdebug_dev_info *devip;
7179 block_unblock_all_queues(true);
7180 devip = (struct sdebug_dev_info *)sdev->hostdata;
7181 if (NULL == devip) {
7182 block_unblock_all_queues(false);
7185 num_in_q = atomic_read(&devip->num_in_q);
7187 if (qdepth > SDEBUG_CANQUEUE) {
7188 qdepth = SDEBUG_CANQUEUE;
7189 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7190 qdepth, SDEBUG_CANQUEUE);
7194 if (qdepth != sdev->queue_depth)
7195 scsi_change_queue_depth(sdev, qdepth);
7197 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7198 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7199 __func__, qdepth, num_in_q);
7201 block_unblock_all_queues(false);
7202 return sdev->queue_depth;
7205 static bool fake_timeout(struct scsi_cmnd *scp)
7207 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7208 if (sdebug_every_nth < -1)
7209 sdebug_every_nth = -1;
7210 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7211 return true; /* ignore command causing timeout */
7212 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7213 scsi_medium_access_command(scp))
7214 return true; /* time out reads and writes */
7219 /* Response to TUR or media access command when device stopped */
7220 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7224 ktime_t now_ts = ktime_get_boottime();
7225 struct scsi_device *sdp = scp->device;
7227 stopped_state = atomic_read(&devip->stopped);
7228 if (stopped_state == 2) {
7229 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7230 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7231 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7232 /* tur_ms_to_ready timer extinguished */
7233 atomic_set(&devip->stopped, 0);
7237 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7239 sdev_printk(KERN_INFO, sdp,
7240 "%s: Not ready: in process of becoming ready\n", my_name);
7241 if (scp->cmnd[0] == TEST_UNIT_READY) {
7242 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7244 if (diff_ns <= tur_nanosecs_to_ready)
7245 diff_ns = tur_nanosecs_to_ready - diff_ns;
7247 diff_ns = tur_nanosecs_to_ready;
7248 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7249 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7250 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7252 return check_condition_result;
7255 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7257 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7259 return check_condition_result;
7262 static int sdebug_map_queues(struct Scsi_Host *shost)
7266 if (shost->nr_hw_queues == 1)
7269 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7270 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7274 if (i == HCTX_TYPE_DEFAULT)
7275 map->nr_queues = submit_queues - poll_queues;
7276 else if (i == HCTX_TYPE_POLL)
7277 map->nr_queues = poll_queues;
7279 if (!map->nr_queues) {
7280 BUG_ON(i == HCTX_TYPE_DEFAULT);
7284 map->queue_offset = qoff;
7285 blk_mq_map_queues(map);
7287 qoff += map->nr_queues;
7294 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7297 bool retiring = false;
7298 int num_entries = 0;
7299 unsigned int qc_idx = 0;
7300 unsigned long iflags;
7301 ktime_t kt_from_boot = ktime_get_boottime();
7302 struct sdebug_queue *sqp;
7303 struct sdebug_queued_cmd *sqcp;
7304 struct scsi_cmnd *scp;
7305 struct sdebug_dev_info *devip;
7306 struct sdebug_defer *sd_dp;
7308 sqp = sdebug_q_arr + queue_num;
7309 spin_lock_irqsave(&sqp->qc_lock, iflags);
7311 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7313 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7316 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7318 if (unlikely(qc_idx >= sdebug_max_queue))
7321 sqcp = &sqp->qc_arr[qc_idx];
7322 sd_dp = sqcp->sd_dp;
7323 if (unlikely(!sd_dp))
7326 if (unlikely(scp == NULL)) {
7327 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7328 queue_num, qc_idx, __func__);
7331 if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7332 if (kt_from_boot < sd_dp->cmpl_ts)
7335 } else /* ignoring non REQ_POLLED requests */
7337 devip = (struct sdebug_dev_info *)scp->device->hostdata;
7339 atomic_dec(&devip->num_in_q);
7341 pr_err("devip=NULL from %s\n", __func__);
7342 if (unlikely(atomic_read(&retired_max_queue) > 0))
7345 sqcp->a_cmnd = NULL;
7346 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7347 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7348 sqp, queue_num, qc_idx, __func__);
7351 if (unlikely(retiring)) { /* user has reduced max_queue */
7354 retval = atomic_read(&retired_max_queue);
7355 if (qc_idx >= retval) {
7356 pr_err("index %d too large\n", retval);
7359 k = find_last_bit(sqp->in_use_bm, retval);
7360 if ((k < sdebug_max_queue) || (k == retval))
7361 atomic_set(&retired_max_queue, 0);
7363 atomic_set(&retired_max_queue, k + 1);
7365 sd_dp->defer_t = SDEB_DEFER_NONE;
7366 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7367 scsi_done(scp); /* callback to mid level */
7368 spin_lock_irqsave(&sqp->qc_lock, iflags);
7371 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7372 if (num_entries > 0)
7373 atomic_add(num_entries, &sdeb_mq_poll_count);
7377 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7378 struct scsi_cmnd *scp)
7381 struct scsi_device *sdp = scp->device;
7382 const struct opcode_info_t *oip;
7383 const struct opcode_info_t *r_oip;
7384 struct sdebug_dev_info *devip;
7385 u8 *cmd = scp->cmnd;
7386 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7387 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7390 u64 lun_index = sdp->lun & 0x3FFF;
7397 scsi_set_resid(scp, 0);
7398 if (sdebug_statistics) {
7399 atomic_inc(&sdebug_cmnd_count);
7400 inject_now = inject_on_this_cmd();
7404 if (unlikely(sdebug_verbose &&
7405 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7410 sb = (int)sizeof(b);
7412 strcpy(b, "too long, over 32 bytes");
7414 for (k = 0, n = 0; k < len && n < sb; ++k)
7415 n += scnprintf(b + n, sb - n, "%02x ",
7418 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7419 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7421 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7422 return SCSI_MLQUEUE_HOST_BUSY;
7423 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7424 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7427 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7428 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7429 devip = (struct sdebug_dev_info *)sdp->hostdata;
7430 if (unlikely(!devip)) {
7431 devip = find_build_dev_info(sdp);
7435 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7436 atomic_set(&sdeb_inject_pending, 1);
7438 na = oip->num_attached;
7440 if (na) { /* multiple commands with this opcode */
7442 if (FF_SA & r_oip->flags) {
7443 if (F_SA_LOW & oip->flags)
7446 sa = get_unaligned_be16(cmd + 8);
7447 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7448 if (opcode == oip->opcode && sa == oip->sa)
7451 } else { /* since no service action only check opcode */
7452 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7453 if (opcode == oip->opcode)
7458 if (F_SA_LOW & r_oip->flags)
7459 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7460 else if (F_SA_HIGH & r_oip->flags)
7461 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7463 mk_sense_invalid_opcode(scp);
7466 } /* else (when na==0) we assume the oip is a match */
7468 if (unlikely(F_INV_OP & flags)) {
7469 mk_sense_invalid_opcode(scp);
7472 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7474 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7475 my_name, opcode, " supported for wlun");
7476 mk_sense_invalid_opcode(scp);
7479 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7483 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7484 rem = ~oip->len_mask[k] & cmd[k];
7486 for (j = 7; j >= 0; --j, rem <<= 1) {
7490 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7495 if (unlikely(!(F_SKIP_UA & flags) &&
7496 find_first_bit(devip->uas_bm,
7497 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7498 errsts = make_ua(scp, devip);
7502 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7503 atomic_read(&devip->stopped))) {
7504 errsts = resp_not_ready(scp, devip);
7508 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7510 if (unlikely(sdebug_every_nth)) {
7511 if (fake_timeout(scp))
7512 return 0; /* ignore command: make trouble */
7514 if (likely(oip->pfp))
7515 pfp = oip->pfp; /* calls a resp_* function */
7517 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7520 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7521 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7522 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7523 sdebug_ndelay > 10000)) {
7525 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7526 * for Start Stop Unit (SSU) want at least 1 second delay and
7527 * if sdebug_jdelay>1 want a long delay of that many seconds.
7528 * For Synchronize Cache want 1/20 of SSU's delay.
7530 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7531 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7533 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7534 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7536 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7539 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7541 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7544 static struct scsi_host_template sdebug_driver_template = {
7545 .show_info = scsi_debug_show_info,
7546 .write_info = scsi_debug_write_info,
7547 .proc_name = sdebug_proc_name,
7548 .name = "SCSI DEBUG",
7549 .info = scsi_debug_info,
7550 .slave_alloc = scsi_debug_slave_alloc,
7551 .slave_configure = scsi_debug_slave_configure,
7552 .slave_destroy = scsi_debug_slave_destroy,
7553 .ioctl = scsi_debug_ioctl,
7554 .queuecommand = scsi_debug_queuecommand,
7555 .change_queue_depth = sdebug_change_qdepth,
7556 .map_queues = sdebug_map_queues,
7557 .mq_poll = sdebug_blk_mq_poll,
7558 .eh_abort_handler = scsi_debug_abort,
7559 .eh_device_reset_handler = scsi_debug_device_reset,
7560 .eh_target_reset_handler = scsi_debug_target_reset,
7561 .eh_bus_reset_handler = scsi_debug_bus_reset,
7562 .eh_host_reset_handler = scsi_debug_host_reset,
7563 .can_queue = SDEBUG_CANQUEUE,
7565 .sg_tablesize = SG_MAX_SEGMENTS,
7566 .cmd_per_lun = DEF_CMD_PER_LUN,
7568 .max_segment_size = -1U,
7569 .module = THIS_MODULE,
7570 .track_queue_depth = 1,
7573 static int sdebug_driver_probe(struct device *dev)
7576 struct sdebug_host_info *sdbg_host;
7577 struct Scsi_Host *hpnt;
7580 sdbg_host = to_sdebug_host(dev);
7582 sdebug_driver_template.can_queue = sdebug_max_queue;
7583 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7584 if (!sdebug_clustering)
7585 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7587 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7589 pr_err("scsi_host_alloc failed\n");
7593 if (submit_queues > nr_cpu_ids) {
7594 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7595 my_name, submit_queues, nr_cpu_ids);
7596 submit_queues = nr_cpu_ids;
7599 * Decide whether to tell scsi subsystem that we want mq. The
7600 * following should give the same answer for each host.
7602 hpnt->nr_hw_queues = submit_queues;
7603 if (sdebug_host_max_queue)
7604 hpnt->host_tagset = 1;
7606 /* poll queues are possible for nr_hw_queues > 1 */
7607 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7608 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7609 my_name, poll_queues, hpnt->nr_hw_queues);
7614 * Poll queues don't need interrupts, but we need at least one I/O queue
7615 * left over for non-polled I/O.
7616 * If condition not met, trim poll_queues to 1 (just for simplicity).
7618 if (poll_queues >= submit_queues) {
7619 if (submit_queues < 3)
7620 pr_warn("%s: trim poll_queues to 1\n", my_name);
7622 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7623 my_name, submit_queues - 1);
7629 sdbg_host->shost = hpnt;
7630 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7631 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7632 hpnt->max_id = sdebug_num_tgts + 1;
7634 hpnt->max_id = sdebug_num_tgts;
7635 /* = sdebug_max_luns; */
7636 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7640 switch (sdebug_dif) {
7642 case T10_PI_TYPE1_PROTECTION:
7643 hprot = SHOST_DIF_TYPE1_PROTECTION;
7645 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7648 case T10_PI_TYPE2_PROTECTION:
7649 hprot = SHOST_DIF_TYPE2_PROTECTION;
7651 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7654 case T10_PI_TYPE3_PROTECTION:
7655 hprot = SHOST_DIF_TYPE3_PROTECTION;
7657 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7662 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7666 scsi_host_set_prot(hpnt, hprot);
7668 if (have_dif_prot || sdebug_dix)
7669 pr_info("host protection%s%s%s%s%s%s%s\n",
7670 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7671 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7672 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7673 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7674 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7675 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7676 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7678 if (sdebug_guard == 1)
7679 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7681 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7683 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7684 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7685 if (sdebug_every_nth) /* need stats counters for every_nth */
7686 sdebug_statistics = true;
7687 error = scsi_add_host(hpnt, &sdbg_host->dev);
7689 pr_err("scsi_add_host failed\n");
7691 scsi_host_put(hpnt);
7693 scsi_scan_host(hpnt);
7699 static void sdebug_driver_remove(struct device *dev)
7701 struct sdebug_host_info *sdbg_host;
7702 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7704 sdbg_host = to_sdebug_host(dev);
7706 scsi_remove_host(sdbg_host->shost);
7708 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7710 list_del(&sdbg_devinfo->dev_list);
7711 kfree(sdbg_devinfo->zstate);
7712 kfree(sdbg_devinfo);
7715 scsi_host_put(sdbg_host->shost);
7718 static int pseudo_lld_bus_match(struct device *dev,
7719 struct device_driver *dev_driver)
7724 static struct bus_type pseudo_lld_bus = {
7726 .match = pseudo_lld_bus_match,
7727 .probe = sdebug_driver_probe,
7728 .remove = sdebug_driver_remove,
7729 .drv_groups = sdebug_drv_groups,